Jump to content
  • Advertisement
Sign in to follow this  
ScopeDynamo

Backproplem

This topic is 4829 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.

If you intended to correct an error in the post then please contact us.

Recommended Posts

Hi, I've been coding a small Neural Network lib, and it worked upto the point where I coded backprop 'training'. The neural net is a standard 3 layer feed forward network. (Input Layer->Hidden Layer->Output Layer.) All easy to grasp as I wasn't going for extreme effiency, just trying to get it running atm. Here's the code,
using namespace std;
double rndn( double from,double fin)
{
    double rndr =(double) rand()/RAND_MAX;
    return from+(fin-from)*rndr;
};

double rndn()
{
    return (double)rand()/RAND_MAX;
};
double sigmoid(double netinput, double response=1)
{
    return ( 1.0 / ( 1.0 + exp(-netinput / response)));
}


class Neuron
{
public:


    ~Neuron()
    {}

	Neuron(){
		ic=0;
		oc=0;
		output=0;
		echoOut=false;
	};
	void modWeight( int connection,double val){
		w[connection]+=val;
	};
    void connectNeuron( Neuron *to){
        out.push_back( to );
        oc++;
        to->addInput( (Neuron *)this);
      
//		cout<<"Connected Neuron"<<endl;
//		cout<<"Oc:"+oc<<endl;
//		cout<<"ic:"+to->ic;
	};
    
    void init(){
        oc=0;
        ic=0;
     
      
        for(int j=0;j<255;j++)
        {
            w[j]=-1+(rndn()*2);
       
        };
        
        output =0;
    };
    
    
    
	double getWeight( int connection =0){
		return w[connection];
	};

	void sumInputs()
    {
        double weight =0;
			
        for(int i=0;i<ic;i++)
        {
		    weight += w * in->getOutput() ;
		};
	
//        weight+= w[ic]*-1;
       // if(output>w[ic]*-1){
           output =sigmoid(weight,-1);
       // }else{
        //    output=0;
        //};
        
    };
    
    void fire(){
        
    };

    void setOutput( double noutput)
    {
        output=noutput;
    };

    double getOutput()
    {
		if(echoOut==true){
		cout<<"Echo Neuron called."<<endl;
		cout<<"Output is:"<<output<<endl;
		};

		return output;

    };
	void incInput(){
		ic++;
	};
	void addInput( Neuron *inN){
		in.push_back( inN );
		ic++;
		cout <<"Added input>"<<ic<<endl;
	};
	void echoOutput( bool enable=true){
		echoOut=enable;
	};
	double getErr(){
		return err;
	};
	void setErr( double val){
		err=val;
	};
	double getDelta(){
		return delta;
	};
	void setDelta(double ndelta){
		delta = ndelta;
	};

private:
    vector<Neuron *>in;
    vector<Neuron *>out;
    double w[255];
    double output;
    int ic,oc;
	bool echoOut;
	double err;
	double delta;
};
// TODO (Antony#1#): Try increasing network by adding random neurons in areas of high activitity.
class NInput
{
public:
	void addWatch(){
		dat->echoOutput(true);
	};
	void value(double input)
    {
        dat->setOutput( input );
    }
    Neuron *getNeuron(){
        return dat;
    };
    void setNeuron(Neuron *an){
        dat = an;
    };
	
	protected:
    Neuron *dat;

};

class NOutput
{
public:
    double value()
    {
        return dat->getOutput();
    }
    Neuron *getNeuron(){
        return dat;
    };
    void setNeuron(Neuron *an){
        dat = an;
    };
	void target( double val){
		desired = val;
	};
	double getTarget(){
		return desired;
	};

    protected:
    Neuron *dat;
    const char *name;
	double desired;
};



class NeuralNet
{
public:
   NeuralNet(){
	   ic=oc=hc=0;
   };
   ~NeuralNet(){
   };

    void addHiddenLayer( int neurons=8)
    {
        for(int j=0;j<neurons;j++)
        {
            
            hidden.push_back( new Neuron);
            //hidden[j]->init();
            
        };
        
        hc=neurons;
    };

    NInput* addInput()
    {
        NInput *out = new NInput;
        out->setNeuron( new Neuron);
        out->getNeuron()->init();
        inputs.push_back( out );
        ic++;
        return out;
    };

    NOutput *addOutput()
    {
        NOutput *out = new NOutput;
        out->setNeuron( new Neuron);
        out->getNeuron()->init();
        outputs.push_back( out );
        oc++;
        
        return out;

    };

    void connectNetwork()
    {
        for(int j=0;j<ic;j++)
        {
            for(int k=0;k<hc;k++)
            {
               Neuron *neuron;
                Neuron *hneuron = hidden[k];
                NInput *tempi = inputs[j];
               neuron = tempi->getNeuron();
                neuron->connectNeuron( hneuron );
            };
        };
		
        for(int j=0;j<hc;j++)
        {
            for(int k=0;k<oc;k++)
            {
                Neuron *neuron = hidden[j];
                Neuron *oneuron;
                NOutput *tempo=outputs[k];
                oneuron = tempo->getNeuron();
                neuron->connectNeuron( oneuron );
            };
        };
    };
    
    void cycle(){
        //Feed network.
		for(int j=0;j<hc;j++){
            hidden[j]->sumInputs();
		//	hidden[j]->modWeight( 0,5 );
		};
        for(int j=0;j<oc;j++){
            outputs[j]->getNeuron()->sumInputs();
        };
		//Calculate output neurons error.
		for(int j=0;j<oc;j++){
			double error;
		    double target=outputs[j]->getTarget();
			double actual=outputs[j]->value();
			error=(target - actual) * actual * (1 - actual);
			//error = actual * ( 1-actual)*(target-actual);
			outputs[j]->getNeuron()->setDelta( error );
		};
		double delta_sum=0;

		for(int j=0;j<hc;++j){
			delta_sum=0;
			for (int k=0;k<oc;++k)
			{
				delta_sum+=outputs[k]->getNeuron()->getDelta()*outputs[k]->getNeuron()->getWeight( j );
			//	outputs[k]->getNeuron()->modWeight( j, outputs[k]->getNeuron()->getDelta()*outputs[k]->getNeuron()->getOutput() );
			}
			//outputs[k
			
			//	hidden_weight[middlenode][outputnode]+=delta_output[outputnode]*out_hidden[middlenode];
			cout<<"Delta Sum was:"<<delta_sum<<endl;


			//	delta_hidden[middlenode]=delta_sum*out_hidden[middlenode]*(1-out_hidden[middlenode]);
			hidden[j]->setDelta( delta_sum*hidden[j]->getOutput()*(1-hidden[j]->getOutput()) );
		};

		for(int j=0;j<ic;++j){
			for(int k=0;k<hc;++k){
				//hidden[k]->modWeight( j,2 );
				hidden[k]->modWeight( j,hidden[k]->getDelta()*inputs[j]->getNeuron()->getOutput() );
			};
		};


	//Calculate hidden neurons error.
    };

protected:
    vector<NInput *> inputs;
    int ic;
    vector<Neuron *> hidden;
    int hc;
    vector<NOutput*> outputs;
    int oc;
private:

};

#endif // NEURALNET_H

And here's the bit that specifically updates the net and the backprop code. (Cut and pasted from above, not in addition)

void cycle(){
        //Feed network.
		for(int j=0;j<hc;j++){
            hidden[j]->sumInputs();
		//	hidden[j]->modWeight( 0,5 );
		};
        for(int j=0;j<oc;j++){
            outputs[j]->getNeuron()->sumInputs();
        };
		//Calculate output neurons error.
		for(int j=0;j<oc;j++){
			double error;
		    double target=outputs[j]->getTarget();
			double actual=outputs[j]->value();
			error=(target - actual) * actual * (1 - actual);
			//error = actual * ( 1-actual)*(target-actual);
			outputs[j]->getNeuron()->setDelta( error );
		};
		double delta_sum=0;

		for(int j=0;j<hc;++j){
			delta_sum=0;
			for (int k=0;k<oc;++k)
			{
				delta_sum+=outputs[k]->getNeuron()->getDelta()*outputs[k]->getNeuron()->getWeight( j );
			//	outputs[k]->getNeuron()->modWeight( j, outputs[k]->getNeuron()->getDelta()*outputs[k]->getNeuron()->getOutput() );
			}
			//outputs[k
			
			//	hidden_weight[middlenode][outputnode]+=delta_output[outputnode]*out_hidden[middlenode];
			cout<<"Delta Sum was:"<<delta_sum<<endl;


			//	delta_hidden[middlenode]=delta_sum*out_hidden[middlenode]*(1-out_hidden[middlenode]);
			hidden[j]->setDelta( delta_sum*hidden[j]->getOutput()*(1-hidden[j]->getOutput()) );
		};

		for(int j=0;j<ic;++j){
			for(int k=0;k<hc;++k){
				//hidden[k]->modWeight( j,2 );
				hidden[k]->modWeight( j,hidden[k]->getDelta()*inputs[j]->getNeuron()->getOutput() );
			};
		};

  };
Am I doing something wrong? As is the outputs NEVER change, no matter how much I fiddle with the hidden layer's weights.

Share this post


Link to post
Share on other sites
Advertisement
Hi,

Thanks, I converted the backprop specific part of your code to my net engine and it worked perfectly.(As far as I can tell anyway. No expert)


Here's my net class again with your code coverted in case you or anyone else finds it helpful. I wish I had a simple program like this a few days ago when I started out :)



using namespace std;
double rndn( double from,double fin)
{
srand( (int)GetTickCount() );
double rndr =(double) rand()/RAND_MAX;
return from+(fin-from)*rndr;
};

double rndn()
{
srand( (int)GetTickCount() );
return (double)rand()/RAND_MAX;
};
double sigmoid(double netinput, double response=1)
{
return ( 1.0 / ( 1.0 + exp(-netinput / response)));
}


class Neuron
{
public:


~Neuron()
{}

Neuron(){
ic=0;
oc=0;
output=0;
echoOut=false;
init();
outmode=0;
};
void modWeight( int connection,double val){
w[connection]+=val;
};
void connectNeuron( Neuron *to){
out.push_back( to );
oc++;
to->addInput( (Neuron *)this);

// cout<<"Connected Neuron"<<endl;
// cout<<"Oc:"+oc<<endl;
// cout<<"ic:"+to->ic;
};

void init(){
oc=0;
ic=0;


for(int j=0;j<255;j++)
{
w[j]=-1.0+rndn()*2.0;
};

output =0;
};



double getWeight( int connection =0){
return w[connection];
};

void sumInputs()
{
double weight =0;

for(int i=0;i<ic+1;i++)
{
if(i<ic){
weight += w * in->getOutput() ;
}else{
weight += w * 1;
};
};


//.. weight+= w[ic]*-1;
// if(output>w[ic]*-1){
if(outmode=1){
output =sigmoid(weight,1);
}else{
output=weight;
};
};

void fire(){

};

void setOutput( double noutput)
{
output=noutput;
};

void outputMode(int mode){
outmode=mode;
};
double getOutput()
{
if(echoOut==true){
cout<<"Echo Neuron called."<<endl;
cout<<"Output is:"<<output<<endl;
};

return output;

};
void incInput(){
ic++;
};
void addInput( Neuron *inN){
in.push_back( inN );
ic++;
cout <<"Added input>"<<ic<<endl;
};
void echoOutput( bool enable=true){
echoOut=enable;
};
double getErr(){
return err;
};
void setErr( double val){
err=val;
};
double getDelta(){
return delta;
};
void setDelta(double ndelta){
delta = ndelta;
};

private:
vector<Neuron *>in;
vector<Neuron *>out;
double w[255];
double output;
int ic,oc;
bool echoOut;
double err;
double delta;
int outmode;
};

// TODO (Antony#1#): Try increasing network by adding random neurons in areas of high activitity.
class NInput
{
public:
void addWatch(){
dat->echoOutput(true);
};
void value(double input)
{
dat->setOutput( input );
}
double getValue(){
return dat->getOutput();
};
Neuron *getNeuron(){
return dat;
};
void setNeuron(Neuron *an){
dat = an;
};

protected:
Neuron *dat;

};

class NOutput
{
public:
NOutput(){
desired=0;
};
~NOutput(){
};
double value()
{
return dat->getOutput();
}
Neuron *getNeuron(){
return dat;
};
void setNeuron(Neuron *an){
dat = an;
};
void target( double val){
desired = val;
};
double getTarget(){
return desired;
};

protected:
Neuron *dat;
const char *name;
double desired;
};



class NeuralNet
{
public:
NeuralNet(){
ic=oc=hc=0;
doBackprop=true;
};
~NeuralNet(){
};

void addHiddenLayer( int neurons=8)
{
hc=0;
for(int j=0;j<neurons;j++)
{

hidden.push_back( new Neuron);
hidden[j]->outputMode( 1 );
hc++;
};
// hc=neurons;
};

NInput* addInput()
{
NInput *out = new NInput;
out->setNeuron( new Neuron);
out->getNeuron()->init();
out->getNeuron()->outputMode( 1);
inputs.push_back( out );
ic++;
return out;
};

NOutput *addOutput()
{
NOutput *out = new NOutput;
out->setNeuron( new Neuron);
out->getNeuron()->init();
outputs.push_back( out );
oc++;

return out;

};

void connectNetwork()
{
for(int j=0;j<ic;j++)
{
for(int k=0;k<hc;k++)
{
Neuron *neuron;
Neuron *hneuron = hidden[k];
NInput *tempi = inputs[j];
neuron = tempi->getNeuron();
neuron->connectNeuron( hneuron );
};
};

for(int j=0;j<hc;j++)
{
for(int k=0;k<oc;k++)
{
Neuron *neuron = hidden[j];
Neuron *oneuron;
NOutput *tempo=outputs[k];
oneuron = tempo->getNeuron();
neuron->connectNeuron( oneuron );
};
};
};

double cycle(){
//Feed network.
for(int j=0;j<hc;j++){
hidden[j]->sumInputs();

};
for(int j=0;j<oc;j++){
outputs[j]->getNeuron()->sumInputs();
};
double avg_err=0;
//Calculate output neurons error.
for(int j=0;j<oc;j++){
double error;
double target=outputs[j]->getTarget();
double actual=outputs[j]->value();
// error=(target - actual) * actual * (1 - actual);
// error = actual * ( 1-actual)*(target-actual);
error = target-actual ;
avg_err+=sqrt( error*error );
outputs[j]->getNeuron()->setDelta( error );
};
if(doBackprop==false) return 0;
double yout[255];
double hout[255];
double hout2[255];
for(int i=0;i<oc;i++){
yout = 0;
for (int j=0;j<hc+1;j++)
{
if(j<hc){
yout += outputs->getNeuron()->getWeight( j )*hidden[j]->getOutput();
}else{
yout += outputs->getNeuron()->getWeight( j )*1 ;
};
}

// linear derivative = 1
yout = 1*outputs->getNeuron()->getDelta();
};

// calculate the hout derivatives
for (int i=0;i<hc;i++)
{
hout=0;
for (int j=0;j<ic+1;j++)
{
if(j<ic){
hout += hidden->getWeight(j)*inputs[j]->getNeuron()->getOutput();
}else{
hout += hidden->getWeight(j)*1;
};
}
// derivative of 1/(1+exp(-x))
hout = exp( -hout ) / ((1+exp( -hout)) *(1+ exp(-hout)));
}

for (int i=0;i<hc;i++)
{
hout2=0;
for (int j=0;j<oc;j++)
{
hout2 += yout[j]*outputs[j]->getNeuron()->getWeight(i);
}
hout = hout*hout2;
}
double learning_rate = 0.01;
for (int i=0;i<oc;i++)
{
for (int j=0;j<hc+1;j++)
{
if(j<hc){
outputs->getNeuron()->modWeight( j,learning_rate*yout*hidden[j]->getOutput() );
}else{
outputs->getNeuron()->modWeight( j,learning_rate*yout*1 );
};
}
};

// input_weight changes
for (int i=0;i<hc;i++)
{
for (int j=0;j<ic+1;j++)
{
if(j<ic){
hidden->modWeight(j,learning_rate*hout*inputs[j]->getNeuron()->getOutput() );
}else{
hidden->modWeight(j,learning_rate*hout*1);
};
}
}

return avg_err;
//Calculate hidden neurons error.
};
void learn( bool enable = true ){
doBackprop = enable;
};
protected:
bool doBackprop;
vector<NInput *> inputs;
int ic;
vector<Neuron *> hidden;
int hc;
vector<NOutput*> outputs;
int oc;
private:

};

Share this post


Link to post
Share on other sites
Sign in to follow this  

  • Advertisement
×

Important Information

By using GameDev.net, you agree to our community Guidelines, Terms of Use, and Privacy Policy.

We are the game development community.

Whether you are an indie, hobbyist, AAA developer, or just trying to learn, GameDev.net is the place for you to learn, share, and connect with the games industry. Learn more About Us or sign up!

Sign me up!