// $Id$ // // File: nnann_neuron.h // Created: Tue Mar 22 23:31:43 EDT 2011 // Creator: davidl (on Darwin Amelia.local 9.8.0 i386) // #ifndef _nnann_neuron_ #define _nnann_neuron_ #include #include #include // The nnann_neuron class is implemented completely in the header. // // A single neuron has connections to the previous layer (inputs) and // connections to the next layer (outputs) unless, of course, it is in // the first or last layer. // // Connections are contained in the form of nnann_weight objects. These // are added via the the AddInput() and AddOutput() methods. The ClearInputs() // and ClearOutputs() methods are used to empty the lists of inputs and // ouputs, generally so they can be re-filled. Note that the nnann_weight // objects also keep pointers to the neurons they connect so they will // need to be updated independently to any calls to ClearInputs() and // ClearOutputs(). // // The output of the neuron is made by: // 0. Calling ResetInput() for all neurons in the ANN // 1. Setting the output values of the first layer via SetOutput(output); // 2. Reset the input values to 0 for all other layers via ResetInput() // 3. Propagate inputs layer to layer via nnann_weight::Propagate() // (n.b. this will only add the weight scaled value of a single connection // to the next layer's input.) // 4. Apply the activation function to calculate the ouput of al neurons // in the layer by calling CalculateOutput() // 5. Access the output of the neuron with GetOutput(). // // It is important to recognize the difference between the CalculateOutput() // method and the GetOutput() method. The former can be costly and should // be done once during the forward propagation. The latter should be very // fast and can be called multiple times, but only after the former has // been called. class nnann_weight; class nnann_neuron{ public: nnann_neuron():activation_function(NULL),output(0.0),bias(0.0){} virtual ~nnann_neuron(){} void AddInput(nnann_weight *weight){inputs.push_back(weight);} void AddOutput(nnann_weight *weight){outputs.push_back(weight);} void AddToInput(double val){input += val;}; void AddToOutputError(double val){output_err += val;}; double CalculateOutput(void){return output = activation_function->f(input);} double CalculateInputError(void){return input_err=activation_function->InputError(input, output+output_err);} void ClearInputs(void){inputs.clear();} void ClearOutputs(void){outputs.clear();} nnann_function* GetActivationFunction(void) const {return activation_function;} double GetBias(void){return bias;} std::vector GetInputs(void) const {return inputs;} double GetInputError(void) const {return input_err;} double GetOutput(void) const {return output;} double GetOutputError(void) const {return output_err;} std::vector GetOutputs(void) const {return outputs;} void ResetInput(void){input=bias;} void ResetOutputError(void){output_err=0.0;} void SetActivationFunction(nnann_function *fcn){activation_function = fcn;} void SetBias(double bias){this->bias = bias;} void SetInput(double input){this->input = input;} void SetOutput(double output){this->output = output;} void SetOutputError(double err){output_err=err; CalculateInputError();} protected: std::vector inputs; std::vector outputs; nnann_function *activation_function; double output; double output_err; // Only set when training and correct output is known double input; double input_err; // Backpropagated from output_err and output double bias; private: }; #endif // _nnann_neuron_