1层感知器的错误学习



我需要帮助我的感知器1层,我使用函数sigmoide传递函数和算法反向传播进行学习。我想做一个简单的神经网络来计算a和B(逻辑和)。我的问题是学习后,我把2值(例如0和0),我的IA给我总是0.99。我看了三遍代码,我不明白为什么我的程序学习后返回坏答案。请帮帮我。

Neuron.java:

public class Neuron {
  public double value;
  public double[] weights;
  public double bias;
  public double deltas;
public Neuron(int nb_entree){
    weights = new double[nb_entree];
    value =  Math.random() / 10000000000000.0;
    bias = Math.random() / 10000000000000.0;
    deltas = Math.random() / 10000000000000.0;
    for(int i = 0 ; i < weights.length ; i++){
        weights[i] = Math.random() / 10000000000000.0;
    }
}
/***
 * Function to evaluate a neurone with a sigmoide function
 * @param input : list to input value
 * @return the result of sigmoide function
 */
public double evaluate(double[] input){
    double x = 0.0;
    for(int i = 0 ; i < input.length ; i++){
        x += input[i] * weights[i];
    }
    x += bias;
    value = 1 / (1 + Math.pow(Math.E, x));
    return value;
}
//Function to delete value of neurons
protected void delete(){
    value = 0.0;
}
}

NeuralNetwork.java:

public class NeuralNetwork {
  public Neuron[] neurons_hidden;
  public Neuron[] neurons_output;
  public double rate_learning;
  public int nb_hidden;
  public int nb_output;
public NeuralNetwork(int nb_input, int nb_hid, int nb_out, double rate){
    nb_hidden = nb_hid;
    nb_output = nb_out;
    rate_learning = rate;
    neurons_hidden = new Neuron[nb_hidden];
    neurons_output = new Neuron[nb_output];
    //Create hidden neurons
    for(int i = 0 ; i < nb_hidden ; i++){
        neurons_hidden[i] = new Neuron(nb_input);
    }
    //Create output neurons
    for(int i = 0 ; i < nb_output ; i++){
        neurons_output[i] = new Neuron(nb_hidden);
    }
}
public double[] evaluate(double[] input){
    double[] output_hidden = new double[nb_hidden];
    double[] outputs = new double[nb_output];
    //we delete the value of hidden neurons
    for(Neuron n : neurons_hidden){
        n.delete();
    }
    //we delete the value of output neurons
    for(Neuron n : neurons_output){
        n.delete();
    }
    //Pour chaque neurone caches
    for(int i = 0 ; i < nb_hidden ; i++){
        output_hidden[i] = neurons_hidden[i].evaluate(input);   
    }
    //Pour chaque neurone sortie
    for(int i = 0 ; i < nb_output ; i++){
        outputs[i] = neurons_output[i].evaluate(output_hidden);
    }
    return outputs;
}

public double backPropagate(double[] input, double[] output){
    double[] output_o = evaluate(input);
    double error;
    int i;
    int k;
    //For all neurons output, we compute the deltas
    for(i = 0 ; i < nb_output ; i++){
            error = output[i] - output_o[i];
            neurons_output[i].deltas = error * (output_o[i] - Math.pow(output_o[i], 2));
    }
    //For all neurons hidden, we compute the deltas
    for(i = 0 ; i < nb_hidden ; i++){
        error = 0.0;
        for(k = 0 ; k < nb_output ; k++){
            error += neurons_output[k].deltas * neurons_output[k].weights[i];
        }
        neurons_hidden[i].deltas = error * (neurons_hidden[i].value - Math.pow(neurons_hidden[i].value, 2));            
    }

    //For all neurons output, we modify the weight
    for(i = 0 ; i < nb_output ; i++){
        for(k = 0 ; k <  nb_hidden ; k++){
            neurons_output[i].weights[k] += rate_learning * 
                                                neurons_output[i].deltas * 
                                                    neurons_hidden[k].value;
        }
        neurons_output[i].bias += rate_learning * neurons_output[i].deltas;
    }

    //For all neurons hidden, we modify the weight
    for(i = 0 ; i < nb_hidden ; i++){
        for(k = 0 ; k < input.length ; k++){
            neurons_hidden[i].weights[k] += rate_learning * neurons_hidden[i].deltas * input[k];        
        }
        neurons_hidden[i].bias += rate_learning * neurons_hidden[i].deltas;
    }
    error = 0.0;
    for(i = 0 ; i < output.length ; i++){
        error += Math.abs(output_o[i] - output[i]);
    }
    error = error / output.length;
    return error;
}
}

Test.java:

public class Test {
public static void main(String[] args) {
    NeuralNetwork net = new NeuralNetwork(2, 2, 1, 0.6);
    /* Learning */
    for(int i = 0 ; i < 10000 ; i++)
    {   
        double[] inputs = new double[]{Math.round(Math.random()), Math.round(Math.random())};
        double[] output = new double[1];
        double error;
        if((inputs[0] == inputs[1]) && (inputs[0] == 1))
            output[0] = 1.0;
        else
            output[0] = 0.0;
        System.out.println(inputs[0]+" and "+inputs[1]+" = "+output[0]);
        error = net.backPropagate(inputs, output);
        System.out.println("Error at step "+i+" is "+error);
    }
    System.out.println("Learning finish!");
    /* Test */
    double[] inputs = new double[]{0.0, 0.0};
    double[] output = net.evaluate(inputs);
    System.out.println(inputs[0]+" and "+inputs[1]+" = "+output[0]+""); 
}
}

Thanks to help me

您的sigmoid函数不正确。它需要一个负的t

1 / (1 + Math.pow(Math.E, -x))

我不确定这是否是唯一的错误。

另外,对于连接的"and",你只需要一个图层。

最后,你在你的反向传播方法中单独处理你的偏差。这可以通过添加一个输入节点来简化,输入节点为常数1,偏置为权重。看到https://en.wikipedia.org/wiki/Perceptron定义。

最新更新