aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorg Holzmann <grholzi@users.sourceforge.net>2005-07-12 14:40:21 +0000
committerGeorg Holzmann <grholzi@users.sourceforge.net>2005-07-12 14:40:21 +0000
commit7c3c5dd0f8d7089bd50282e9dcd56e36798e18cf (patch)
tree156c00e6213b8c48e520d9eeee499ddce4c649be
parent94d966b50ab1a09d8650b7c693e9223273a44acf (diff)
initial commit of pix_recNN
svn path=/trunk/externals/grh/; revision=3320
-rwxr-xr-xpix_recNN/Makefile46
-rwxr-xr-xpix_recNN/NNActivation.h78
-rwxr-xr-xpix_recNN/NNException.h49
-rwxr-xr-xpix_recNN/NNet.h636
-rwxr-xr-xpix_recNN/Neuron.cpp169
-rwxr-xr-xpix_recNN/Neuron.h191
-rwxr-xr-xpix_recNN/RecurrentNeuron.cpp226
-rwxr-xr-xpix_recNN/RecurrentNeuron.h149
-rwxr-xr-xpix_recNN/gpl.txt346
-rwxr-xr-xpix_recNN/help-pix_recNN.pd146
-rwxr-xr-xpix_recNN/pix_recNN.cpp423
-rwxr-xr-xpix_recNN/pix_recNN.h204
-rwxr-xr-xpix_recNN/readme.txt27
13 files changed, 2690 insertions, 0 deletions
diff --git a/pix_recNN/Makefile b/pix_recNN/Makefile
new file mode 100755
index 0000000..ab880e8
--- /dev/null
+++ b/pix_recNN/Makefile
@@ -0,0 +1,46 @@
+PD-PATH=/usr/lib/pd
+PD-SCR=/usr/include
+
+# location of the GEM sources and Gem.pd_linux:
+GEM-SCR=/home/Georg/pd-cvs/gem/Gem/src
+GEM-LIB=$(PD-PATH)/extra/Gem.pd_linux
+
+
+CC = g++
+LD = g++
+INCLUDE=-I$(PD-SCR) -I$(GEM-SCR) -I./src
+LIB=-lc -lm -L$(GEM-LIB)
+CC_FLAGS = -c -Wall -g -g -O2 -mmmx -fno-builtin -O3 -funroll-loops -ffast-math
+LD_FLAGS = --export-dynamic -shared -o
+
+
+TARGET=pix_recNN.pd_linux
+OBJ=RecurrentNeuron.o Neuron.o pix_recNN.o
+#--------------------------------------------------------
+
+all: pd_linux
+
+pd_linux: $(TARGET)
+
+$(TARGET): $(OBJ)
+ $(LD) $(LD_FLAGS) $(TARGET) $(OBJ) $(LIB)
+ strip --strip-unneeded $(TARGET)
+ chmod 755 $(TARGET)
+
+pix_recNN.o: RecurrentNeuron.o pix_recNN.h pix_recNN.cpp NNet.h NNException.h
+ $(CC) $(CC_FLAGS) $(INCLUDE) pix_recNN.cpp
+
+
+RecurrentNeuron.o: RecurrentNeuron.cpp RecurrentNeuron.h Neuron.o NNActivation.h
+
+Neuron.o: Neuron.cpp Neuron.h NNActivation.h
+
+#--------------------------------------------------------
+
+clean:
+ rm -f $(OBJ) $(TARGET)
+
+
+install:
+ cp -f $(TARGET) $(PD-PATH)/externs
+ cp -f *.pd $(PD-PATH)/doc/5.reference
diff --git a/pix_recNN/NNActivation.h b/pix_recNN/NNActivation.h
new file mode 100755
index 0000000..e91c046
--- /dev/null
+++ b/pix_recNN/NNActivation.h
@@ -0,0 +1,78 @@
+/////////////////////////////////////////////////////////////////////////////
+//
+// NNActivation.h
+//
+// all the activation functions of the neurons
+//
+// header file
+//
+// Copyright (c) 2005 Georg Holzmann <grh@gmx.at>
+//
+// This program is free software; you can redistribute it and/or
+// modify it under the terms of the GNU General Public License
+// as published by the Free Software Foundation; either version 2
+// of the License, or (at your option) any later version.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef _INCLUDE_ACTIVATION_NET__
+#define _INCLUDE_ACTIVATION_NET__
+
+
+#include <math.h>
+
+namespace TheBrain
+{
+
+//------------------------------------------------------
+/* implementation of the different activation functions
+ * and it's derivations
+ */
+
+/* Linear activation function.
+ * span: -inf < y < inf
+ * y = x
+*/
+#define LINEAR 0
+
+/* Sigmoid activation function.
+ * span: 0 < y < 1
+ * y = 1/(1 + exp(-x)), y' = y*(1 - y)
+ */
+#define SIGMOID 1
+
+/* Symmetric sigmoid activation function, aka. tanh.
+ * span: -1 < y < 1
+ * y = tanh(x) = 2/(1 + exp(-2*x)) - 1, d = 1-(y*y)
+*/
+#define TANH 2
+
+// linear function
+float act_linear(float value)
+{ return value; }
+
+// derivation of the linear function
+float act_linear_derive(float value)
+{ return 1; }
+
+// sigmoid function
+float act_sigmoid(float value)
+{ return (1.0f/(1.0f + exp(-value))); }
+
+// derivation of the sigmoid function
+float act_sigmoid_derive(float value)
+{ return (value * (1.0f - value)); }
+
+// tanh function
+float act_tanh(float value)
+{ return (2.0f/(1.0f + exp(-2.0f * value)) - 1.0f); }
+
+// derivation of the tanh function
+float act_tanh_derive(float value)
+{ return (1.0f - (value*value)); }
+
+
+} // end of namespace
+
+#endif // _INCLUDE_ACTIVATION_NET__
diff --git a/pix_recNN/NNException.h b/pix_recNN/NNException.h
new file mode 100755
index 0000000..bcb7be5
--- /dev/null
+++ b/pix_recNN/NNException.h
@@ -0,0 +1,49 @@
+/////////////////////////////////////////////////////////////////////////////
+//
+// NNDefines.h
+//
+// global stuff for all the nets
+//
+// header file
+//
+// Copyright (c) 2005 Georg Holzmann <grh@gmx.at>
+//
+// This program is free software; you can redistribute it and/or
+// modify it under the terms of the GNU General Public License
+// as published by the Free Software Foundation; either version 2
+// of the License, or (at your option) any later version.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef _INCLUDE_NNDEFINES_NET__
+#define _INCLUDE_NNDEFINES_NET__
+
+#include <string>
+
+using std::string;
+
+namespace TheBrain
+{
+
+//------------------------------------------------------
+/* the exception class for all the neural network stuff
+ */
+class NNExcept
+{
+ protected:
+ string message_;
+
+ public:
+ NNExcept(string message="")
+ { message_ = message; }
+ virtual ~NNExcept() { }
+
+ virtual string what()
+ { return message_; }
+};
+
+} // end of namespace NNet
+
+#endif //_INCLUDE_NNDEFINES_NET__
+
diff --git a/pix_recNN/NNet.h b/pix_recNN/NNet.h
new file mode 100755
index 0000000..349688f
--- /dev/null
+++ b/pix_recNN/NNet.h
@@ -0,0 +1,636 @@
+/////////////////////////////////////////////////////////////////////////////
+//
+// class NNet
+//
+// this is a template for all the nets
+// (see NeuralNet documentations for more information)
+//
+// header file
+//
+// Copyright (c) 2005 Georg Holzmann <grh@gmx.at>
+//
+//
+// This program is free software; you can redistribute it and/or
+// modify it under the terms of the GNU General Public License
+// as published by the Free Software Foundation; either version 2
+// of the License, or (at your option) any later version.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef _INCLUDE_NEURAL_TEMPLATE_NET__
+#define _INCLUDE_NEURAL_TEMPLATE_NET__
+
+#include "NNActivation.h"
+#include "NNException.h"
+
+namespace TheBrain
+{
+
+template <class HiddNeuronType,class OutNeuronType>
+class NNet
+{
+ protected:
+
+ /* the number of output values
+ * this is automatically also the
+ * number of output neurons !
+ */
+ int output_val_;
+
+ /* the number of hidden neurons
+ * per one output neuron
+ * (this net has one hidden layer,
+ * so this is the number of hidden
+ * neurons is hidden_val_*output_val_)
+ */
+ int hidden_val_;
+
+ /* nr of input values per one output neuron
+ * (so the number of input values are
+ * input_val_*output_val_)
+ */
+ int input_val_;
+
+ /* the memory of the output layer
+ * if you use a recurrent neuron, this
+ * determines how much output values the
+ * recurrent neurons can remeber
+ * these values are fed back as new input
+ */
+ int memory_out_;
+
+ /* the memory of the hidden layer
+ * if you use a recurrent neuron, this
+ * determines how much output values the
+ * recurrent neurons can remeber
+ * these values are fed back as new input
+ */
+ int memory_hidden_;
+
+ /* these are the output neurons
+ */
+ OutNeuronType *out_neurons_;
+
+ /* these are the hidden neurons
+ */
+ HiddNeuronType *hidden_neurons_;
+
+ /* function pointer to the activation
+ * function of the output neurons
+ */
+ float (*output_act_f)(float value);
+
+ /* function pointer to the activation
+ * function of the hidden neurons
+ */
+ float (*hidden_act_f)(float value);
+
+ /* function pointer to the derivation of the
+ * activation function of the hidden neurons
+ */
+ float (*hidden_act_f_d)(float value);
+
+
+ public:
+
+ /* Constructor
+ */
+ NNet(int input_val=1, int hidden_val=1, int output_val=1, int memory_out=0,
+ int memory_hidden=1, int HIDDEN_ACT_FUNC=0, int OUT_ACT_FUNC=0);
+
+ /* Destructor
+ */
+ virtual ~NNet();
+
+
+ //-----------------------------------------------------
+
+ /* Set/Get learning rate
+ */
+ virtual void setLearningRate(float learn_rate);
+ virtual float getLearningRate() const;
+
+ /* Set/Get range
+ * (see Neuron.h)
+ */
+ virtual void setRange(float range);
+ virtual float getRange() const;
+
+ /* some more get/set methods
+ */
+ virtual void setOutputVal(int output_val)
+ throw();
+ virtual int getOutputVal() const;
+
+ virtual void setHiddenVal(int hidden_val)
+ throw();
+ virtual int getHiddenVal() const;
+
+ virtual void setInputVal(int input_val)
+ throw();
+ virtual int getInputVal() const;
+
+ virtual void setMemoryOut(int memory)
+ throw();
+ virtual int getMemoryOut() const;
+
+ virtual void setMemoryHidden(int memory)
+ throw();
+ virtual int getMemoryHidden() const;
+
+
+ //-----------------------------------------------------
+
+ /* creates the network
+ */
+ virtual void create()
+ throw(NNExcept);
+
+ /* inits the weight matrix and the bias vector of
+ * the network with random values between [min|max]
+ */
+ virtual void initRand(const int &min, const int &max)
+ throw(NNExcept);
+
+ /* calculates the output with the current Net and writes
+ * it in the array output_data
+ * ATTENTION: array input_data must be a matrix in the form:
+ * float[output_val_][input_val_]
+ * array output_data must be in size output_val_
+ * (there is no checking !!!)
+ */
+ virtual void calculate(float **input_data, float *output_data);
+
+ /* this method trains the network:
+ * input_data is, as above, the input data, output_data is the
+ * output of the current net with input_data, target_output is
+ * the desired output data
+ * (this is the a truncated backpropagation through time
+ * algorithm to train the network)
+ * ATTENTION: array input_data must be a matrix in the form:
+ * float[output_val_][input_val_]
+ * array output_data must be in size output_val_
+ * array target_output must be in size output_val_
+ * (there is no checking !!!)
+ */
+ virtual void trainBTT(float **input_data, float *output_data,
+ float *target_output);
+
+
+ //-----------------------------------------------------
+
+ /* saves the contents of the current net to file
+ */
+ virtual void save(string filename)
+ throw(NNExcept);
+
+ /* loads the parameters of the net from file
+ */
+ virtual void load(string filename)
+ throw(NNExcept);
+
+
+ //-----------------------------------------------------
+ private:
+
+ /* output of the hidden layer with activation function
+ */
+ float *hidden_a_;
+
+ /* output of the hidden layer without activation function
+ */
+ float *hidden_s_;
+
+ /* error signal of the neurons in the hidden layer
+ */
+ float *hidden_error_;
+
+ /* out signal without activation function
+ */
+ float out_s_;
+
+ /* error signal of the output layer
+ */
+ float out_error_;
+
+ /* Copy Construction is not allowed
+ */
+ NNet(const NNet<HiddNeuronType,OutNeuronType> &src)
+ { }
+
+ /* assignement operator is not allowed
+ */
+ const NNet<HiddNeuronType,OutNeuronType>& operator=
+ (const NNet<HiddNeuronType,OutNeuronType>& src)
+ { return *this; }
+};
+
+
+//--------------------------------------------------
+/* Constructor
+ */
+template <class HiddNeuronType, class OutNeuronType>
+NNet<HiddNeuronType,OutNeuronType>
+ ::NNet(int input_val, int hidden_val, int output_val, int memory_out,
+ int memory_hidden, int HIDDEN_ACT_FUNC, int OUT_ACT_FUNC)
+ : out_neurons_(NULL), hidden_neurons_(NULL), hidden_a_(NULL),
+ hidden_s_(NULL), hidden_error_(NULL)
+{
+ output_val_ = (output_val<1) ? 1 : output_val;
+ hidden_val_ = (hidden_val<0) ? 0 : hidden_val;
+ input_val_ = (input_val<1) ? 1 : input_val;
+ memory_out_ = (memory_out<0) ? 0 : memory_out;
+ memory_hidden_ = (memory_hidden<0) ? 0 : memory_hidden;
+
+ // choose hidden activation function:
+ switch(HIDDEN_ACT_FUNC)
+ {
+ case SIGMOID:
+ hidden_act_f = act_sigmoid;
+ hidden_act_f_d = act_sigmoid_derive;
+ break;
+ case TANH:
+ hidden_act_f = act_tanh;
+ hidden_act_f_d = act_tanh_derive;
+ break;
+ default:
+ case LINEAR:
+ hidden_act_f = act_linear;
+ hidden_act_f_d = act_linear_derive;
+ break;
+ }
+
+ // choose out function:
+ switch(OUT_ACT_FUNC)
+ {
+ case SIGMOID:
+ output_act_f = act_sigmoid;
+ break;
+ case TANH:
+ output_act_f = act_tanh;
+ break;
+ default:
+ case LINEAR:
+ output_act_f = act_linear;
+ break;
+ }
+}
+
+//--------------------------------------------------
+/* Destructor
+ */
+template <class HiddNeuronType, class OutNeuronType>
+NNet<HiddNeuronType, OutNeuronType>::~NNet()
+{
+ if(hidden_neurons_)
+ delete[] hidden_neurons_;
+
+ if(out_neurons_)
+ delete[] out_neurons_;
+
+ if(hidden_a_)
+ delete[] hidden_a_;
+
+ if(hidden_s_)
+ delete[] hidden_s_;
+
+ if(hidden_error_)
+ delete[] hidden_error_;
+}
+
+//--------------------------------------------------
+/* creates the network
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::create()
+ throw(NNExcept)
+{
+ // delete if they exist
+ if(out_neurons_)
+ delete[] out_neurons_;
+ if(hidden_neurons_)
+ delete[] hidden_neurons_;
+ if(hidden_a_)
+ delete[] hidden_a_;
+ if(hidden_s_)
+ delete[] hidden_s_;
+ if(hidden_error_)
+ delete[] hidden_error_;
+
+
+ out_neurons_ = new OutNeuronType[output_val_](input_val_,memory_out_);
+ hidden_neurons_ = new HiddNeuronType[hidden_val_*output_val_](input_val_,memory_hidden_);
+
+ if(!out_neurons_ || !hidden_neurons_)
+ throw NNExcept("No memory for Neurons!");
+
+ // create the temporary storage
+ hidden_a_ = new float[hidden_val_];
+ hidden_s_ = new float[hidden_val_];
+ hidden_error_ = new float[hidden_val_];
+
+ if(!hidden_a_ || !hidden_s_ || !hidden_error_)
+ throw NNExcept("No memory for Neurons!");
+
+
+ // create all the neurons
+ for(int i=0; i<output_val_; i++)
+ out_neurons_[i].create();
+ for(int i=0; i<hidden_val_*output_val_; i++)
+ hidden_neurons_[i].create();
+}
+
+//--------------------------------------------------
+/* inits the weight matrix and the bias vector of
+ * the network with random values between [min|max]
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::initRand(const int &min, const int &max)
+ throw(NNExcept)
+{
+ if(!out_neurons_)
+ throw NNExcept("You must first create the Net!");
+
+ // init all the neurons
+ for(int i=0; i<output_val_; i++)
+ out_neurons_[i].initRand(min,max);
+ for(int i=0; i<hidden_val_*output_val_; i++)
+ hidden_neurons_[i].initRand(min,max);
+}
+
+//--------------------------------------------------
+/* calculates the output with the current Net and writes
+ * it in the array output_data
+ * ATTENTION: array input_data must be a matrix in the form:
+ * float[output_val_][input_val_]
+ * array output_data must be in size output_val_
+ * (there is no checking !!!)
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::calculate(float **input_data, float *output_data)
+{
+ for(int i=0; i<output_val_; i++)
+ {
+
+ // 1.: calculation of the hidden layer
+ for(int j=0; j<hidden_val_; j++)
+ {
+ hidden_a_[j] = hidden_act_f(
+ hidden_neurons_[i*hidden_val_+j].calculate(input_data[i]) );
+ }
+
+ // 2.: calculation of the output layer
+ *output_data++ = output_act_f( out_neurons_[i].calculate(hidden_a_) );
+ }
+}
+
+//--------------------------------------------------
+/* this method trains the network:
+ * input_data is, as above, the input data, output_data is the
+ * output of the current net with input_data, target_output is
+ * the desired output data
+ * (this is the a truncated backpropagation through time
+ * algorithm to train the network)
+ * ATTENTION: array input_data must be a matrix in the form:
+ * float[output_val_][input_val_]
+ * array output_data must be in size output_val_
+ * array target_output must be in size output_val_
+ * (there is no checking !!!)
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::trainBTT(float **input_data, float *output_data,
+ float *target_output)
+{
+ post("train");
+
+ for(int i=0; i<output_val_; i++)
+ {
+
+
+ //---------------------------------------------------------
+ // 1. Forward - Pass:
+ //
+ // the output of the hidden and the output-layer
+ // are calculated and saved (before and after
+ // the activation function)
+
+ // calculation of the hidden layer
+ for(int j=0; j<hidden_val_; j++)
+ {
+ hidden_s_[j] = hidden_neurons_[i*hidden_val_+j].calculate(input_data[i]);
+ hidden_a_[j] = hidden_act_f(hidden_s_[j]);
+ }
+
+ // calculation of the output layer
+ out_s_ = out_neurons_[i].calculate(hidden_a_);
+ output_data[i] = output_act_f(out_s_);
+
+
+ //---------------------------------------------------------
+ // 2. Backward - Pass:
+ //
+ // calculation of the error signals
+ // (they are also stored)
+
+ // output layer
+ out_error_ = output_data[i] - target_output[i];
+
+ // hidden layer:
+ for(int j=0; j<hidden_val_; j++)
+ {
+ hidden_error_[j] = hidden_act_f_d( hidden_s_[j]+0.1 ) *
+ ( out_error_ * out_neurons_[i].getIW(j) );
+ }
+
+
+ //---------------------------------------------------------
+ // 3. Modification of the weights:
+
+ for(int j=0; j<hidden_val_; j++)
+ {
+ // output layer:
+ out_neurons_[i].setIW(j,
+ out_neurons_[i].getIW(j) -
+ getLearningRate() * out_error_
+ * hidden_a_[j] );
+
+ // hidden layer:
+ for(int k=0; k<input_val_; k++)
+ {
+ hidden_neurons_[i*hidden_val_+j].setIW(k,
+ hidden_neurons_[i*hidden_val_+j].getIW(k) -
+ getLearningRate() * hidden_error_[j]
+ * input_data[i][k]/hidden_neurons_[0].getRange() );
+ }
+
+
+ // recurrent part of the hidden layer:
+ float delta = getLearningRate() * hidden_error_[j] * hidden_a_[j];
+ for(int k=0; k<memory_hidden_; k++)
+ {
+ hidden_neurons_[i*hidden_val_+j].setLW(k,
+ hidden_neurons_[i*hidden_val_+j].getLW(k) - delta);
+ }
+ }
+
+ // recurrent part of the output layer:
+ float delta = getLearningRate() * out_error_ * output_data[i];
+ for(int j=0; j<memory_out_; j++)
+ {
+ out_neurons_[i].setLW(j,
+ out_neurons_[i].getLW(j) - delta);
+ }
+
+
+ }
+}
+
+//--------------------------------------------------
+/* saves the contents of the current net to file
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::save(string filename)
+ throw(NNExcept)
+{
+
+}
+
+//--------------------------------------------------
+ /* loads the parameters of the net from file
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::load(string filename)
+ throw(NNExcept)
+{
+
+}
+
+//-----------------------------------------------------
+/* Set/Get learning rate
+ * (see Neuron.h)
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::setLearningRate(float learn_rate)
+{
+ learn_rate = (learn_rate<0) ? 0 : learn_rate;
+
+ for(int i=0; i<output_val_; i++)
+ out_neurons_[i].setLearningRate(learn_rate);
+ for(int i=0; i<hidden_val_*output_val_; i++)
+ hidden_neurons_[i].setLearningRate(learn_rate);
+}
+template <class HiddNeuronType, class OutNeuronType>
+float NNet<HiddNeuronType, OutNeuronType>::getLearningRate() const
+{
+ return out_neurons_[0].getLearningRate();
+}
+
+//-----------------------------------------------------
+/* Set/Get range
+ * (see Neuron.h)
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::setRange(float range)
+{
+ for(int i=0; i<output_val_; i++)
+ out_neurons_[i].setRange(1);
+
+ for(int i=0; i<hidden_val_*output_val_; i++)
+ hidden_neurons_[i].setRange(range);
+}
+template <class HiddNeuronType, class OutNeuronType>
+float NNet<HiddNeuronType, OutNeuronType>::getRange() const
+{
+ return hidden_neurons_[0].getRange();
+}
+
+//-----------------------------------------------------
+/* get/set output_val_
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::setOutputVal(int output_val)
+ throw()
+{
+ output_val_ = (output_val<1) ? 1 : output_val;
+
+ create();
+}
+template <class HiddNeuronType, class OutNeuronType>
+int NNet<HiddNeuronType,OutNeuronType>::getOutputVal() const
+{
+ return output_val_;
+}
+
+//-----------------------------------------------------
+/* get/set hidden_val_
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::setHiddenVal(int hidden_val)
+ throw()
+{
+ hidden_val_ = (hidden_val<1) ? 1 : hidden_val;
+
+ create();
+}
+template <class HiddNeuronType, class OutNeuronType>
+int NNet<HiddNeuronType,OutNeuronType>::getHiddenVal() const
+{
+ return hidden_val_;
+}
+
+//-----------------------------------------------------
+/* get/set input_val_
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::setInputVal(int input_val)
+ throw()
+{
+ input_val_ = (input_val<1) ? 1 : input_val;
+
+ create();
+}
+template <class HiddNeuronType, class OutNeuronType>
+int NNet<HiddNeuronType,OutNeuronType>::getInputVal() const
+{
+ return input_val_;
+}
+
+//-----------------------------------------------------
+/* get/set memory of the output layer
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::setMemoryOut(int memory)
+ throw()
+{
+ memory_out_ = (memory<0) ? 0 : memory;
+
+ create();
+}
+template <class HiddNeuronType, class OutNeuronType>
+int NNet<HiddNeuronType,OutNeuronType>::getMemoryOut() const
+{
+ return memory_out_;
+}
+
+//-----------------------------------------------------
+/* get/set memory of the hidden layer
+ */
+template <class HiddNeuronType, class OutNeuronType>
+void NNet<HiddNeuronType,OutNeuronType>::setMemoryHidden(int memory)
+ throw()
+{
+ memory_hidden_ = (memory<0) ? 0 : memory;
+
+ create();
+}
+template <class HiddNeuronType, class OutNeuronType>
+int NNet<HiddNeuronType,OutNeuronType>::getMemoryHidden() const
+{
+ return memory_hidden_;
+}
+
+
+} // end of namespace
+
+#endif //_INCLUDE_LIN_NEURAL_NET__
diff --git a/pix_recNN/Neuron.cpp b/pix_recNN/Neuron.cpp
new file mode 100755
index 0000000..c020c1c
--- /dev/null
+++ b/pix_recNN/Neuron.cpp
@@ -0,0 +1,169 @@
+/////////////////////////////////////////////////////////////////////////////
+//
+// class Neuron
+//
+// source file
+//
+// Copyright (c) 2005 Georg Holzmann <grh@gmx.at>
+//
+// This program is free software; you can redistribute it and/or
+// modify it under the terms of the GNU General Public License
+// as published by the Free Software Foundation; either version 2
+// of the License, or (at your option) any later version.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+#include "Neuron.h"
+
+namespace TheBrain
+{
+
+//--------------------------------------------------
+/* Constructor
+ */
+Neuron::Neuron(int inputs, int dummy)
+ : learn_rate_(0), range_(1), IW_(NULL), b1_(0)
+{
+ inputs_ = (inputs<1) ? 1 : inputs;
+}
+
+//--------------------------------------------------
+/* Destructor
+ */
+Neuron::~Neuron()
+{
+ if(IW_)
+ delete[] IW_;
+}
+
+//--------------------------------------------------
+/* creates a new IW-matrix (size: inputs_) and
+ * b1-vector
+ * ATTENTION: if they exist they'll be deleted
+ */
+void Neuron::create()
+ throw(NNExcept)
+{
+ // delete if they exist
+ if(IW_)
+ delete[] IW_;
+
+ IW_ = new float[inputs_];
+ if(!IW_)
+ throw NNExcept("No memory for Neurons!");
+}
+
+//--------------------------------------------------
+/* inits the weight matrix and the bias vector of
+ * the network with random values between [min|max]
+ */
+void Neuron::initRand(const int &min, const int &max)
+ throw(NNExcept)
+{
+ if(!IW_)
+ throw NNExcept("You must first create the Net!");
+
+ // make randomvalue between 0 and 1
+ // then map it to the bounds
+ b1_ = ((float)rand()/(float)RAND_MAX)*(max-min) + min;
+
+ for(int i=0; i<inputs_; i++)
+ {
+ IW_[i] = ((float)rand()/(float)RAND_MAX)*(max-min) + min;
+ }
+
+ //post("b1: %f, IW: %f %f %f", b1_, IW_[0], IW_[1], IW_[2]);
+}
+
+//--------------------------------------------------
+/* inits the net with a given weight matrix and bias
+ * (makes a deep copy)
+ * ATTENTION: the dimension of IW-pointer must be the same
+ * as the inputs !!!
+ */
+void Neuron::init(const float *IW, float b1)
+ throw(NNExcept)
+{
+ if(!IW_)
+ throw NNExcept("You must first create the Net!");
+
+ b1_ = b1;
+
+ for(int i=0; i<inputs_; i++)
+ IW_[i] = IW[i];
+}
+
+//--------------------------------------------------
+/* calculates the output with the current IW, b1 values
+ * ATTENTION: the array input_data must be in the same
+ * size as inputs_
+ */
+float Neuron::calculate(float *input_data)
+{
+ float output = 0;
+
+ // multiply the inputs with the weight matrix IW
+ // and add the bias vector b1
+ for(int i=0; i<inputs_; i++)
+ {
+ output += input_data[i] * IW_[i];
+ }
+
+ // map input values to the range
+ output /= range_;
+
+ //post("b1: %f, IW: %f %f %f", b1_, IW_[0], IW_[1], IW_[2]);
+ //post("range: %f, in: %f %f %f, out: %f",range_,input_data[0],
+ // input_data[1], input_data[2], output+b1_);
+
+ return (output+b1_);
+}
+
+//--------------------------------------------------
+/* this method trains the network:
+ * input_data is, as above, the input data, output_data is the
+ * output of the current net with input_data (output_data is not
+ * calculated in that method !), target_output is the desired
+ * output data
+ * (this is the LMS-algorithm to train linear neural networks)
+ * ATTENTION: the array input_data must be in the same
+ * size as inputs_
+ * returns the calculated value
+ */
+// float Neuron::trainLMS(const float *input_data,
+// const float &target_output)
+// {
+// float output = 0;
+
+// // multiply the inputs with the weight matrix IW
+// // and add the bias vector b1
+// for(int i=0; i<inputs_; i++)
+// {
+// output += input_data[i] * IW_[i];
+// }
+
+// // map input values to the range
+// output /= range_;
+
+// output += b1_;
+
+// //------------
+
+// // this is the LMS-algorithm to train linear
+// // neural networks
+
+// // calculate the error signal:
+// float error = (target_output - output);
+
+// // now change the weights the bias
+// for(int i=0; i<inputs_; i++)
+// IW_[i] += 2 * learn_rate_ * error * (input_data[i]/range_);
+
+// b1_ += 2 * learn_rate_ * error;
+
+// //------------
+
+// return (output);
+// }
+
+} // end of namespace
diff --git a/pix_recNN/Neuron.h b/pix_recNN/Neuron.h
new file mode 100755
index 0000000..f10d993
--- /dev/null
+++ b/pix_recNN/Neuron.h
@@ -0,0 +1,191 @@
+/////////////////////////////////////////////////////////////////////////////
+//
+// class Neuron
+//
+// this is an implementation of one neuron of a Neural Network
+// so this neuron has a Weight-Matrix IW and a bias vector b1
+// this neuron can have n input values, but only one output value
+// (see NeuralNet documentations for more information)
+//
+// header file
+//
+// Copyright (c) 2005 Georg Holzmann <grh@gmx.at>
+//
+// This program is free software; you can redistribute it and/or
+// modify it under the terms of the GNU General Public License
+// as published by the Free Software Foundation; either version 2
+// of the License, or (at your option) any later version.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef _INCLUDE_NEURON_NET__
+#define _INCLUDE_NEURON_NET__
+
+#include <stdlib.h>
+#include <stdexcept>
+#include "NNException.h"
+#include "m_pd.h" //debug
+
+namespace TheBrain
+{
+
+//------------------------------------------------------
+/* class of one neuron
+ */
+class Neuron
+{
+ protected:
+
+ /* this is the number of input values, which is
+ * automatically the input and the size of IW
+ */
+ int inputs_;
+
+ /* the input weight matrix IW
+ * (size: inputs )
+ */
+ float *IW_;
+
+ /* the bias vector b1
+ */
+ float b1_;
+
+ /* the learning rate of the net
+ */
+ float learn_rate_;
+
+ /* the range of the input values should be from 0
+ * to range_
+ * outputvalues are from -1 to 1
+ */
+ float range_;
+
+
+ public:
+
+ /* Constructor
+ */
+ Neuron(int inputs, int dummy=0);
+
+ /* Destructor
+ */
+ virtual ~Neuron();
+
+
+ //-----------------------------------------------------
+
+ /* Set/Get learning rate
+ */
+ virtual void setLearningRate(float learn_rate)
+ { learn_rate_=learn_rate; }
+ virtual float getLearningRate() const
+ { return learn_rate_; }
+
+ /* Set/Get range
+ */
+ virtual void setRange(float range)
+ { range_=range; }
+ virtual float getRange() const
+ { return range_; }
+
+ /* some more get/set methods
+ */
+
+ virtual int getInputs() const
+ { return inputs_; }
+
+ virtual float *getIW() const
+ { return IW_; }
+ virtual float getIW(int index) const
+ { return IW_[index]; }
+
+ virtual void setIW(const float *IW)
+ { for(int i=0; i<inputs_; i++) IW_[i] = IW[i]; }
+ virtual void setIW(int index, float value)
+ { IW_[index] = value; }
+
+ virtual float getb1() const
+ { return b1_; }
+ virtual void setb1(float b1)
+ { b1_ = b1; }
+
+
+ /* dummies
+ */
+
+ virtual int getMemory() const
+ { return 0; }
+
+ virtual float *getLW() const
+ { return NULL; }
+ virtual float getLW(int index) const
+ { return 0; }
+
+ virtual void setLW(const float *LW)
+ { }
+ virtual void setLW(int index, float value)
+ { }
+
+
+ //-----------------------------------------------------
+
+ /* creates a new IW-matrix (size: inputs_) and
+ * b1-vector
+ * ATTENTION: if they exist they'll be deleted
+ */
+ virtual void create()
+ throw(NNExcept);
+
+ /* inits the weight matrix and the bias vector of
+ * the network with random values between [min|max]
+ */
+ virtual void initRand(const int &min, const int &max)
+ throw(NNExcept);
+
+ /* inits the net with a given weight matrix and bias
+ * (makes a deep copy)
+ * ATTENTION: the dimension of IW-pointer must be the same
+ * as the inputs !!!
+ */
+ virtual void init(const float *IW, float b1)
+ throw(NNExcept);
+
+ /* calculates the output with the current IW, b1 values
+ * ATTENTION: the array input_data must be in the same
+ * size as inputs_
+ */
+ virtual float calculate(float *input_data);
+
+ /* this method trains the network:
+ * input_data is, as above, the input data, output_data is the
+ * output of the current net with input_data (output_data is not
+ * calculated in that method !), target_output is the desired
+ * output data
+ * (this is the LMS-algorithm to train linear neural networks)
+ * ATTENTION: the array input_data must be in the same
+ * size as inputs_
+ * returns the calculated value
+ */
+/* virtual float trainLMS(const float *input_data, */
+/* const float &target_output); */
+
+
+ //-----------------------------------------------------
+ private:
+
+ /* Copy Construction is not allowed
+ */
+ Neuron(const Neuron &src)
+ { }
+
+ /* assignement operator is not allowed
+ */
+ const Neuron& operator= (const Neuron& src)
+ { return *this; }
+};
+
+
+} // end of namespace
+
+#endif //_INCLUDE_NEURON_NET__
diff --git a/pix_recNN/RecurrentNeuron.cpp b/pix_recNN/RecurrentNeuron.cpp
new file mode 100755
index 0000000..1b322c1
--- /dev/null
+++ b/pix_recNN/RecurrentNeuron.cpp
@@ -0,0 +1,226 @@
+/////////////////////////////////////////////////////////////////////////////
+//
+// class RecurrentNeuron
+//
+// source file
+//
+// Copyright (c) 2005 Georg Holzmann <grh@gmx.at>
+//
+// This program is free software; you can redistribute it and/or
+// modify it under the terms of the GNU General Public License
+// as published by the Free Software Foundation; either version 2
+// of the License, or (at your option) any later version.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+#include "RecurrentNeuron.h"
+
+namespace TheBrain
+{
+
+//--------------------------------------------------
+/* Constructor
+ */
+RecurrentNeuron::RecurrentNeuron(int inputs, int memory)
+ : Neuron(inputs), LW_(NULL), mem_data_(NULL)
+{
+ memory_ = (memory<0) ? 1 : memory+1;
+}
+
+//--------------------------------------------------
+/* Destructor
+ */
+RecurrentNeuron::~RecurrentNeuron()
+{
+ if(LW_)
+ delete[] LW_;
+
+ if(mem_data_)
+ delete[] mem_data_;
+}
+
+//--------------------------------------------------
+/* creates a new IW-matrix (size: inputs_) and
+ * b1-vector
+ * ATTENTION: if they exist they'll be deleted
+ */
+void RecurrentNeuron::create()
+ throw(NNExcept)
+{
+ // delete if they exist
+ if(IW_)
+ delete[] IW_;
+ if(LW_)
+ delete[] LW_;
+ if(mem_data_)
+ delete[] mem_data_;
+
+ IW_ = new float[inputs_];
+ LW_ = new float[memory_];
+ mem_data_ = new float[memory_];
+
+ if(!IW_ || !LW_ || !mem_data_)
+ throw NNExcept("No memory for Neurons!");
+
+ index_=0;
+}
+
+//--------------------------------------------------
+/* inits the weight matrix and the bias vector of
+ * the network with random values between [min|max]
+ */
+void RecurrentNeuron::initRand(const int &min, const int &max)
+ throw(NNExcept)
+{
+ if(!IW_ || !LW_)
+ throw NNExcept("You must first create the Net!");
+
+ // make randomvalue between 0 and 1
+ // then map it to the bounds
+ b1_ = ((float)rand()/(float)RAND_MAX)*(max-min) + min;
+
+ for(int i=0; i<inputs_; i++)
+ {
+ IW_[i] = ((float)rand()/(float)RAND_MAX)*(max-min) + min;
+ }
+ for(int i=0; i<memory_; i++)
+ {
+ //LW_[i] = ((float)rand()/(float)RAND_MAX)*(max-min) + min;
+ LW_[i] = ((float)rand()/(float)RAND_MAX)*(min);
+ }
+}
+
+//--------------------------------------------------
+/* inits the net with given weight matrix and bias
+ * (makes a deep copy)
+ * ATTENTION: the dimension of IW-pointer must be the same
+ * as the inputs (also for LW) !!!
+ */
+void RecurrentNeuron::init(const float *IW, const float *LW, float b1)
+ throw(NNExcept)
+{
+ if(!IW_ || !LW_)
+ throw NNExcept("You must first create the Net!");
+
+ b1_ = b1;
+
+ for(int i=0; i<inputs_; i++)
+ IW_[i] = IW[i];
+ for(int i=0; i<memory_; i++)
+ LW_[i] = LW[i];
+}
+
+//--------------------------------------------------
+/* calculates the output with the current IW, b1 values
+ * ATTENTION: the array input_data must be in the same
+ * size as inputs_
+ */
+float RecurrentNeuron::calculate(float *input_data)
+{
+ float output = 0;
+
+ // multiply the inputs with the weight matrix IW
+ for(int i=0; i<inputs_; i++)
+ {
+ output += input_data[i] * IW_[i];
+ }
+
+ // map input values to the range
+ output /= range_;
+
+ // multiply memory with weight matrix LW
+ // the index is used to make something
+ // like a simple list or ringbuffer
+ for(int i=0; i<memory_; i++)
+ {
+ output += mem_data_[index_] * LW_[i];
+ index_ = (index_+1) % memory_;
+ }
+
+ // now add bias
+ output += b1_;
+
+ // finally save the new output in memory
+ mem_data_[index_] = output;
+ index_ = (index_+1) % memory_;
+
+ //post("input: %f %f %f, IW: %f %f %f, b: %f",
+ // input_data[0], input_data[1], input_data[2],
+ // IW_[0], IW_[1], IW_[2], b1_);
+ //post("output: %f",output);
+
+ return (output);
+}
+
+//--------------------------------------------------
+/* this method trains the network:
+ * input_data is, as above, the input data, output_data is the
+ * output of the current net with input_data (output_data is not
+ * calculated in that method !), target_output is the desired
+ * output data
+ * (this is the LMS-algorithm to train linear neural networks)
+ * ATTENTION: the array input_data must be in the same
+ * size as inputs_
+ * returns the calculated output
+ */
+// float RecurrentNeuron::trainLMS(const float *input_data,
+// const float &target_output)
+// {
+// // calculate output value:
+
+// float output = 0;
+
+// // multiply the inputs with the weight matrix IW
+// for(int i=0; i<inputs_; i++)
+// {
+// output += input_data[i] * IW_[i];
+// }
+
+// // map input values to the range
+// output /= range_;
+
+// // multiply memory with weight matrix LW
+// // the index is used to make something
+// // like a simple list or ringbuffer
+// for(int i=0; i<memory_; i++)
+// {
+// output += mem_data_[index_] * LW_[i];
+// index_ = (index_+1) % memory_;
+// }
+
+// // now add bias
+// output += b1_;
+
+// //----------------
+
+// // this is the LMS-algorithm to train linear
+// // neural networks
+
+// // calculate the error signal:
+// float error = (target_output - output);
+
+// // now change IW
+// for(int i=0; i<inputs_; i++)
+// IW_[i] += 2 * learn_rate_ * error * (input_data[i]/range_);
+
+// // change LW
+// for(int i=0; i<memory_; i++)
+// {
+// LW_[i] += 2 * learn_rate_ * error * mem_data_[index_];
+// index_ = (index_+1) % memory_;
+// }
+
+// // and the bias
+// b1_ += 2 * learn_rate_ * error;
+
+// //-----------------
+
+// // finally save the new output in memory
+// mem_data_[index_] = output;
+// index_ = (index_+1) % memory_;
+
+// return (output);
+// }
+
+
+} // end of namespace
diff --git a/pix_recNN/RecurrentNeuron.h b/pix_recNN/RecurrentNeuron.h
new file mode 100755
index 0000000..ee87068
--- /dev/null
+++ b/pix_recNN/RecurrentNeuron.h
@@ -0,0 +1,149 @@
+/////////////////////////////////////////////////////////////////////////////
+//
+// class RecurrentNeuron
+//
+// this is an implementation of one neuron of a Recurrent Neural Network
+// this neuron can have n input values, m values in it's memory and
+// one output value
+// (see NeuralNet documentations for more information)
+//
+// header file
+//
+// Copyright (c) 2005 Georg Holzmann <grh@gmx.at>
+//
+// This program is free software; you can redistribute it and/or
+// modify it under the terms of the GNU General Public License
+// as published by the Free Software Foundation; either version 2
+// of the License, or (at your option) any later version.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef _INCLUDE_RECURRENT_NEURON_NET__
+#define _INCLUDE_RECURRENT_NEURON_NET__
+
+#include <stdlib.h>
+#include <stdexcept>
+#include "Neuron.h"
+
+namespace TheBrain
+{
+
+//------------------------------------------------------
+/* class of one neuron
+ */
+class RecurrentNeuron : public Neuron
+{
+ protected:
+
+ /* this determines how much output values the net
+ * can remeber
+ * these values are fed back as new input
+ */
+ int memory_;
+
+ /* the weight matrix for the recurrent
+ * values (size: memory_)
+ */
+ float *LW_;
+
+
+ public:
+
+ /* Constructor
+ */
+ RecurrentNeuron(int inputs, int memory);
+
+ /* Destructor
+ */
+ virtual ~RecurrentNeuron();
+
+
+ //-----------------------------------------------------
+ /* some more get/set methods
+ */
+
+ virtual int getMemory() const
+ { return memory_; }
+
+ virtual float *getLW() const
+ { return LW_; }
+ virtual float getLW(int index) const
+ { return LW_[index]; }
+
+ virtual void setLW(const float *LW)
+ { for(int i=0; i<inputs_; i++) LW_[i] = LW[i]; }
+ virtual void setLW(int index, float value)
+ { LW_[index] = value; }
+
+
+ //-----------------------------------------------------
+
+ /* creates a new IW-matrix (size: inputs_) and
+ * b1-vector
+ * ATTENTION: if they exist they'll be deleted
+ */
+ virtual void create()
+ throw(NNExcept);
+
+ /* inits the weight matrix and the bias vector of
+ * the network with random values between [min|max]
+ */
+ virtual void initRand(const int &min, const int &max)
+ throw(NNExcept);
+
+ /* inits the net with given weight matrix and bias
+ * (makes a deep copy)
+ * ATTENTION: the dimension of IW-pointer must be the same
+ * as the inputs (also for LW) !!!
+ */
+ virtual void init(const float *IW, const float *LW, float b1)
+ throw(NNExcept);
+
+ /* calculates the output with the current IW, b1 values
+ * ATTENTION: the array input_data must be in the same
+ * size as inputs_
+ */
+ virtual float calculate(float *input_data);
+
+ /* this method trains the network:
+ * input_data is, as above, the input data, output_data is the
+ * output of the current net with input_data (output_data is not
+ * calculated in that method !), target_output is the desired
+ * output data
+ * (this is the LMS-algorithm to train linear neural networks)
+ * ATTENTION: the array input_data must be in the same
+ * size as inputs_
+ * returns the calculated output
+ */
+/* virtual float trainLMS(const float *input_data, */
+/* const float &target_output); */
+
+
+ //-----------------------------------------------------
+ private:
+
+ /* the storage for the memory data
+ */
+ float *mem_data_;
+
+ /* this index is used to make something
+ * like a simple list or ringbuffer
+ */
+ int index_;
+
+ /* Copy Construction is not allowed
+ */
+ RecurrentNeuron(const RecurrentNeuron &src) : Neuron(1)
+ { }
+
+ /* assignement operator is not allowed
+ */
+ const RecurrentNeuron& operator= (const RecurrentNeuron& src)
+ { return *this; }
+};
+
+
+} // end of namespace
+
+#endif //_INCLUDE_RECURRENT_NEURON_NET__
diff --git a/pix_recNN/gpl.txt b/pix_recNN/gpl.txt
new file mode 100755
index 0000000..5ea29a7
--- /dev/null
+++ b/pix_recNN/gpl.txt
@@ -0,0 +1,346 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) 19yy <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) 19yy name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
+
diff --git a/pix_recNN/help-pix_recNN.pd b/pix_recNN/help-pix_recNN.pd
new file mode 100755
index 0000000..4236941
--- /dev/null
+++ b/pix_recNN/help-pix_recNN.pd
@@ -0,0 +1,146 @@
+#N canvas 871 74 498 783 10;
+#X obj 36 327 gemwin;
+#X msg 36 301 create \, 1;
+#N canvas 75 72 765 790 pix2sig_stuff~ 0;
+#X obj 120 35 gemhead;
+#X obj 120 132 pix_texture;
+#X obj 119 274 outlet~;
+#X obj 139 185 square 4;
+#X obj 139 163 separator;
+#X obj 61 165 separator;
+#X obj 120 101 pix_video;
+#X msg 186 64 dimen 640 480;
+#X obj 26 36 block~ 2048;
+#X msg 186 38 dimen 320 240;
+#X msg 76 535 getprecision;
+#X msg 93 696 getlearnrate;
+#X msg 65 671 learnrate 0.2;
+#X msg 424 459 getneurons;
+#X msg 404 206 train;
+#X obj 31 227 inlet~;
+#X msg 65 647 learnrate 0.05;
+#X msg 381 708 getmemory;
+#X msg 361 639 memory 0;
+#X msg 361 660 memory 1;
+#X obj 61 252 pix_recNN;
+#X text 296 49 <- input dimension;
+#X obj 78 226 r \$0-recNN;
+#X obj 62 564 s \$0-recNN;
+#X msg 76 498 precision \$1;
+#X floatatom 76 481 5 0 0 0 - - -;
+#X text 42 335 precision:;
+#X text 53 358 1: means every pixel is used in calculation;
+#X text 53 372 2: only every second pixel;
+#X text 53 386 ...;
+#X obj 62 411 loadbang;
+#X msg 407 401 neurons 2048;
+#X msg 407 422 neurons 64;
+#X obj 407 492 s \$0-recNN;
+#X text 403 336 neurons:;
+#X text 416 357 nr. of neurons used in the calculation;
+#X text 415 370 (_MUST_ be the same as the buffersize !!!);
+#X text 43 615 learnrate:;
+#X obj 65 725 s \$0-recNN;
+#X msg 361 681 memory 3;
+#X obj 361 741 s \$0-recNN;
+#X text 343 543 memory:;
+#X text 356 565 this determines \, how much values from the past the
+recurrent net considers in the calculation;
+#X text 357 604 (be carefull with large values !!!);
+#X msg 62 456 precision 1;
+#X msg 62 436 precision 4;
+#X obj 404 233 s \$0-recNN;
+#X text 397 126 train:;
+#X text 417 152 trains the neural net;
+#X text 418 166 (the current video frame to;
+#X text 425 178 the current audio block);
+#X connect 0 0 6 0;
+#X connect 1 0 4 0;
+#X connect 1 0 5 0;
+#X connect 4 0 3 0;
+#X connect 5 0 20 0;
+#X connect 6 0 1 0;
+#X connect 7 0 6 0;
+#X connect 9 0 6 0;
+#X connect 10 0 23 0;
+#X connect 11 0 38 0;
+#X connect 12 0 38 0;
+#X connect 13 0 33 0;
+#X connect 14 0 46 0;
+#X connect 15 0 20 0;
+#X connect 16 0 38 0;
+#X connect 17 0 40 0;
+#X connect 18 0 40 0;
+#X connect 19 0 40 0;
+#X connect 20 1 2 0;
+#X connect 22 0 20 0;
+#X connect 24 0 23 0;
+#X connect 25 0 24 0;
+#X connect 30 0 45 0;
+#X connect 31 0 33 0;
+#X connect 32 0 33 0;
+#X connect 39 0 40 0;
+#X connect 44 0 23 0;
+#X connect 45 0 23 0;
+#X restore 89 542 pd pix2sig_stuff~;
+#X msg 110 302 0 \, destroy;
+#X obj 116 587 unsig~;
+#X obj 206 432 osc~ 440;
+#X obj 205 456 *~;
+#X obj 237 456 tgl 15 0 empty empty empty 0 -6 0 8 -262144 -1 -1 0
+1;
+#X obj 207 496 sig~ 0;
+#X floatatom 117 608 8 0 0 0 - - -;
+#X text 25 23 pix_recNN:;
+#X text 24 57 pix_recNN is an instument/interface. This instrument
+should be useful as a general experimental video interface to generate
+audio. You can train the neural net with playing audio samples to specific
+video frames in real-time. The main interest for me was not to train
+the net exactly to reproduce these samples \, but to make experimental
+sounds \, which are "between" all the trained samples.;
+#X text 22 214 (but this version is unfinished - e.g. the training
+algorithm must be tuned etc. - so it's only a very basic prototype...)
+;
+#X text 207 320 <- create gemwin;
+#X obj 41 442 readsf~;
+#X obj 41 401 openpanel;
+#X msg 41 421 open \$1;
+#X obj 41 380 bng 15 250 50 0 empty empty empty 0 -6 0 8 -262144 -1
+-1;
+#X text 67 379 <- load sample for training;
+#X obj 122 417 tgl 25 0 empty empty empty 0 -6 0 8 -195568 -1 -1 0
+1;
+#X floatatom 206 414 5 0 0 0 - - -;
+#X text 272 431 <- simple osc for training;
+#X text 262 497 <- to train silence;
+#X obj 85 463 bng 15 250 50 0 empty empty empty 0 -6 0 8 -262144 -1
+-1;
+#X text 216 541 <- audio/video work;
+#X obj 90 684 dac~;
+#X obj 90 659 *~;
+#X obj 118 659 dbtorms;
+#X floatatom 118 641 5 0 0 0 - - -;
+#X text 168 638 <- outvol in dB;
+#X text 22 170 pix_recNN uses a 2 layer recurrent neural net (for more
+detailed info look at the source code.);
+#X text 119 737 Georg Holzmann <grh@mur.at> \, 2004;
+#X connect 1 0 0 0;
+#X connect 2 0 4 0;
+#X connect 2 0 26 0;
+#X connect 3 0 0 0;
+#X connect 4 0 9 0;
+#X connect 5 0 6 0;
+#X connect 6 0 2 0;
+#X connect 7 0 6 1;
+#X connect 8 0 2 0;
+#X connect 14 0 2 0;
+#X connect 14 1 23 0;
+#X connect 15 0 16 0;
+#X connect 16 0 14 0;
+#X connect 17 0 15 0;
+#X connect 19 0 14 0;
+#X connect 20 0 5 0;
+#X connect 26 0 25 0;
+#X connect 26 0 25 1;
+#X connect 27 0 26 1;
+#X connect 28 0 27 0;
diff --git a/pix_recNN/pix_recNN.cpp b/pix_recNN/pix_recNN.cpp
new file mode 100755
index 0000000..299625a
--- /dev/null
+++ b/pix_recNN/pix_recNN.cpp
@@ -0,0 +1,423 @@
+/////////////////////////////////////////////////////////////////////////////
+//
+// GEM - Graphics Environment for Multimedia
+//
+// pix_recNN
+//
+// Implementation file
+//
+// Copyright (c) 2005 Georg Holzmann <grh@gmx.at>
+// (and of course lot's of other developers for PD and GEM)
+//
+// For information on usage and redistribution, and for a DISCLAIMER OF ALL
+// WARRANTIES, see the file, "GEM.LICENSE.TERMS" in this distribution.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+#include "pix_recNN.h"
+
+CPPEXTERN_NEW_WITH_THREE_ARGS(pix_recNN, t_floatarg, A_DEFFLOAT,
+ t_floatarg, A_DEFFLOAT, t_floatarg, A_DEFFLOAT)
+
+//----------------------------------------------------------
+/* Constructor
+ */
+ pix_recNN::pix_recNN(t_floatarg arg0=64, t_floatarg arg1=1, t_floatarg arg2=1) :
+ m_data_(NULL), m_xsize_(0), m_ysize_(0), m_csize_(0),
+ train_on_(false), net_(NULL), temp_pix_(NULL)
+{
+ // init args ?????????????????????????????????
+ neuron_nr_=2048; //static_cast<int>((arg0<0)?2:arg0);
+ memory_=0;
+ precision_=2; //static_cast<int>((arg2<1)?1:arg2);
+ //post("arg0: %d, arg1: %d",arg0,arg1);
+
+ // generate the in- and outlet:
+ out0_ = outlet_new(this->x_obj, &s_signal);
+ inlet_new(this->x_obj, &this->x_obj->ob_pd, &s_signal, &s_signal);
+
+ // set random seed:
+ srand( (unsigned)time(NULL) );
+
+ // build the net
+ buildNewNet();
+}
+
+//----------------------------------------------------------
+/* Destructor
+ */
+pix_recNN::~pix_recNN()
+{
+ outlet_free(out0_);
+ m_data_ = NULL;
+ m_xsize_ = 0;
+ m_ysize_ = 0;
+
+ // delete net
+ delete net_;
+
+ // delete temp_pix_
+ for(int i=0; i<neuron_nr_; i++)
+ delete[] temp_pix_[i];
+ delete[] temp_pix_;
+}
+
+//----------------------------------------------------------
+/* a helper to build a new net
+ */
+void pix_recNN::buildNewNet()
+{
+ try
+ {
+ if(net_)
+ delete net_;
+
+ if(temp_pix_)
+ {
+ for(int i=0; i<neuron_nr_; i++)
+ delete[] temp_pix_[i];
+ delete[] temp_pix_;
+ }
+
+ // create the net
+ net_ = new NNet<RecurrentNeuron,RecurrentNeuron>(3,3,neuron_nr_,memory_,
+ 0,TANH,LINEAR);
+ if(!net_)
+ {
+ post("pix_recNN~: no memory for neural nets!");
+ net_=NULL;
+ return;
+ }
+
+ // create the temp_pix
+ temp_pix_ = new float*[neuron_nr_];
+ if(!temp_pix_)
+ {
+ post("pix_recNN~: no memory for temp_pix_!");
+ temp_pix_=NULL;
+ return;
+ }
+ for(int i=0; i<neuron_nr_; i++)
+ {
+ temp_pix_[i] = new float[3];
+ if(!temp_pix_[i])
+ {
+ post("pix_recNN~: no memory for temp_pix_!");
+ temp_pix_=NULL;
+ return;
+ }
+ }
+
+ // initialize temp_pix_ with 0
+ for(int i=0; i<neuron_nr_; i++)
+ {
+ for(int j=0; j<3; j++)
+ {
+ temp_pix_[i][j] = 0;
+ }
+ }
+
+ // init the net
+ net_->create();
+ net_->initRand(-1,1);
+ net_->setRange(255);
+ net_->setLearningRate(0.01);
+ }
+ catch(NNExcept &exc)
+ {
+ post("pix_recNN: %s", exc.what().c_str());
+ }
+}
+
+//----------------------------------------------------------
+/* processImage
+ */
+void pix_recNN::processImage(imageStruct &image)
+{
+ m_data_ = image.data;
+ m_xsize_ = image.xsize;
+ m_ysize_ = image.ysize;
+ m_csize_ = image.csize;
+ m_format_ = image.format;
+}
+
+//----------------------------------------------------------
+/* DSP perform
+ */
+t_int* pix_recNN::perform(t_int* w)
+{
+ pix_recNN *x = GetMyClass((void*)w[1]);
+ t_float* in_signal = (t_float*)(w[2]);
+ t_float* out_signal = (t_float*)(w[3]);
+ int blocksize = (t_int)(w[4]);
+
+ if(blocksize != x->neuron_nr_)
+ {
+ post("pix_recNN~: neurons and buffersize are different! You MUST have the same neuron nr as the buffersize !!!");
+ post("neurons: %d, buffersize: %d", x->neuron_nr_, blocksize);
+ return (w+5);
+ }
+
+
+ // some needed data
+ long int pix_size = x->m_xsize_ * x->m_ysize_;
+ int pix_blocksize = (blocksize<pix_size)?blocksize:pix_size;
+
+ // splits the frame into slices, so that the average
+ // of one slice can be used for the network input
+ // there are as much slices as the buffsize is
+
+ float nr = sqrt(blocksize); // the number of slices at the
+ // x- and y-axis
+
+ float x_slice = x->m_xsize_ / nr; // x size of a slice in pixels
+ float y_slice = x->m_ysize_ / nr; // x size of a slice in pixels
+ int x_slice_int = static_cast<int>( x_slice );
+ int y_slice_int = static_cast<int>( y_slice );
+
+ // the number of slices on one axis (is the float nr
+ // from above rounded up)
+ int slice_nr = static_cast<int>(nr) + 1;
+
+
+ if (x->m_data_)
+ {
+ switch(x->m_format_)
+ {
+ case GL_RGBA:
+ {
+ for(int n=0; n<pix_blocksize; n++)
+ {
+ //post("Block %d:",n);
+
+ // calulate the pixel in left upper edge of every slice
+ int lu_pix_x = static_cast<int>( (n % slice_nr) * x_slice );
+ int lu_pix_y = static_cast<int>( static_cast<int>(n / slice_nr) * y_slice );
+
+ //post("lu_pix: %d, %d", lu_pix_x, lu_pix_y);
+
+ // now sum up all the pixels of one slice and then divide through the
+ // number of pixels
+ // the storage to sum the pixels:
+ unsigned long int temp_data[3] = { 0, 0, 0 };
+
+ // only for optimization:
+ int helper1 = x->m_xsize_ * x->m_csize_;
+ int add_count = 0;
+
+ for(int i=0; i<x_slice_int; i+=x->precision_)
+ {
+ for(int j=0; j<y_slice_int; j+=x->precision_)
+ {
+ // the way to access the pixels: (C=chRed, chBlue, ...)
+ //data[Y * xsize * csize + X * csize + C]
+
+ //post("current pixel: %d %d",
+ // ((lu_pix_x+i)%x->m_xsize), ((lu_pix_y+j)%x->m_ysize) );
+
+ temp_data[0] += x->m_data_[
+ (lu_pix_y+j) * helper1
+ + (lu_pix_x+i) * x->m_csize_ + chRed ];
+
+ temp_data[1] += x->m_data_[
+ ((lu_pix_y+j)) * helper1
+ + ((lu_pix_x+i)) * x->m_csize_ + chGreen ];
+
+ temp_data[2] += x->m_data_[
+ ((lu_pix_y+j)%x->m_ysize_) * helper1
+ + ((lu_pix_x+i)%x->m_xsize_) * x->m_csize_ + chBlue ];
+
+ add_count++;
+ }
+ }
+
+ x->temp_pix_[n][0] = temp_data[0] / add_count;
+ x->temp_pix_[n][1] = temp_data[1] / add_count;
+ x->temp_pix_[n][2] = temp_data[2] / add_count;
+ }
+
+ // learning, or calculation:
+ if(!x->train_on_)
+ x->net_->calculate(x->temp_pix_, out_signal);
+ else
+ x->net_->trainBTT(x->temp_pix_, out_signal, in_signal);
+
+ }
+ break;
+
+ default:
+ post("RGB only for now");
+ }
+ }
+ else
+ {
+ pix_blocksize=blocksize;
+ while (pix_blocksize--) *out_signal++=0;
+ }
+
+ x->train_on_=false;
+ return (w+5);
+}
+
+//----------------------------------------------------------
+/* DSP-Message
+ */
+void pix_recNN::dspMess(void *data, t_signal** sp)
+{
+ dsp_add(perform, 4, data, sp[0]->s_vec, sp[1]->s_vec, sp[0]->s_n);
+}
+
+//----------------------------------------------------------
+/* saves the contents of the current net to file
+ */
+void pix_recNN::saveNet(string filename)
+{
+ try
+ {
+ net_->save(filename);
+ post("pix_recNN~: saved to output-file %s", filename.c_str());
+ }
+ catch(NNExcept &exc)
+ {
+ post("pix_recNN: %s", exc.what().c_str());
+ }
+}
+
+//----------------------------------------------------------
+/* loads the parameters of the net from file
+ */
+void pix_recNN::loadNet(string filename)
+{
+ try
+ {
+ net_->load(filename);
+ post("pix_recNN~: loaded file %s", filename.c_str());
+ }
+ catch(NNExcept &exc)
+ {
+ post("pix_recNN: %s", exc.what().c_str());
+ }
+}
+
+//----------------------------------------------------------
+/* setup callback
+ */
+void pix_recNN::obj_setupCallback(t_class *classPtr)
+{
+ class_addcreator((t_newmethod)_classpix_recNN, gensym("pix_recNN~"), A_NULL);
+
+ class_addmethod(classPtr, (t_method)pix_recNN::setNeurons,
+ gensym("neurons"), A_FLOAT, A_NULL);
+ class_addmethod(classPtr, (t_method)pix_recNN::getNeurons,
+ gensym("getneurons"), A_NULL);
+ class_addmethod(classPtr, (t_method)pix_recNN::setMemory,
+ gensym("memory"), A_FLOAT, A_NULL);
+ class_addmethod(classPtr, (t_method)pix_recNN::getMemory,
+ gensym("getmemory"), A_NULL);
+ class_addmethod(classPtr, (t_method)pix_recNN::setPrecision,
+ gensym("precision"), A_FLOAT, A_NULL);
+ class_addmethod(classPtr, (t_method)pix_recNN::getPrecision,
+ gensym("getprecision"), A_NULL);
+ class_addmethod(classPtr, (t_method)pix_recNN::setTrainOn,
+ gensym("train"), A_NULL);
+ class_addmethod(classPtr, (t_method)pix_recNN::setLearnrate,
+ gensym("learnrate"), A_FLOAT, A_NULL);
+ class_addmethod(classPtr, (t_method)pix_recNN::getLearnrate,
+ gensym("getlearnrate"), A_NULL);
+ class_addmethod(classPtr, (t_method)pix_recNN::saveToFile,
+ gensym("save"), A_SYMBOL, A_NULL);
+ class_addmethod(classPtr, (t_method)pix_recNN::loadFromFile,
+ gensym("load"), A_SYMBOL, A_NULL);
+
+ class_addmethod(classPtr, (t_method)pix_recNN::dspMessCallback,
+ gensym("dsp"), A_NULL);
+ class_addmethod(classPtr, nullfn, gensym("signal"), A_NULL);
+}
+
+//----------------------------------------------------------
+/* DSP callback
+ */
+void pix_recNN::dspMessCallback(void *data, t_signal** sp)
+{
+ GetMyClass(data)->dspMess(data, sp);
+}
+
+//----------------------------------------------------------
+/* sets the precision
+ */
+void pix_recNN::setPrecision(void *data, t_floatarg precision)
+{
+ GetMyClass(data)->precision_ =
+ (precision<1) ? 1 : static_cast<int>(precision);
+}
+void pix_recNN::getPrecision(void *data)
+{
+ post("pix_recNN~: precision: %d",GetMyClass(data)->precision_);
+}
+
+//----------------------------------------------------------
+/* method to train the network
+ */
+void pix_recNN::setTrainOn(void *data)
+{
+ GetMyClass(data)->train_on_ = true;
+}
+
+//----------------------------------------------------------
+/* changes the number of neurons
+ * (which should be the same as the audio buffer)
+ * ATTENTION: a new net will be initialized
+ */
+void pix_recNN::setNeurons(void *data, t_floatarg neurons)
+{
+ GetMyClass(data)->neuron_nr_ =
+ (neurons<1) ? 1 : static_cast<int>(neurons);
+
+ GetMyClass(data)->buildNewNet();
+}
+void pix_recNN::getNeurons(void *data)
+{
+ post("pix_recNN~: nr of neurons: %d (MUST be the same as buffersize!)",
+ GetMyClass(data)->neuron_nr_);
+}
+
+//----------------------------------------------------------
+/* changes the nblock size
+ * ATTENTION: a new net will be initialized
+ */
+void pix_recNN::setMemory(void *data, t_floatarg memory)
+{
+ GetMyClass(data)->memory_ =
+ (memory<0) ? 0 : static_cast<int>(memory);
+
+ GetMyClass(data)->buildNewNet();
+}
+void pix_recNN::getMemory(void *data)
+{
+ post("pix_recNN~: memory: %d",
+ GetMyClass(data)->memory_);
+}
+
+//----------------------------------------------------------
+/* sets the learnrate of the net
+ */
+void pix_recNN::setLearnrate(void *data, t_floatarg learn_rate)
+{
+ GetMyClass(data)->net_->setLearningRate(learn_rate);
+}
+void pix_recNN::getLearnrate(void *data)
+{
+ post("pix_recNN~: learning rate: %f",GetMyClass(data)->net_->getLearningRate());
+}
+
+//----------------------------------------------------------
+/* FileIO-stuff
+ */
+void pix_recNN::saveToFile(void *data, t_symbol *filename)
+{
+ GetMyClass(data)->saveNet(filename->s_name);
+}
+void pix_recNN::loadFromFile(void *data, t_symbol *filename)
+{
+ GetMyClass(data)->loadNet(filename->s_name);
+}
diff --git a/pix_recNN/pix_recNN.h b/pix_recNN/pix_recNN.h
new file mode 100755
index 0000000..944ebd3
--- /dev/null
+++ b/pix_recNN/pix_recNN.h
@@ -0,0 +1,204 @@
+/////////////////////////////////////////////////////////////////////////////
+//
+// GEM - Graphics Environment for Multimedia
+//
+// pix_recNN~
+// Calculates an audio signal out of a video frame
+// with a recurrent neural network
+//
+// (see RecurrentNeuralNet.h for more info)
+//
+// header file
+//
+// Copyright (c) 2005 Georg Holzmann <grh@gmx.at>
+// (and of course lot's of other developers for PD and GEM)
+//
+// For information on usage and redistribution, and for a DISCLAIMER OF ALL
+// WARRANTIES, see the file, "GEM.LICENSE.TERMS" in this distribution.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef _INCLUDE_PIX_RECNN_H__
+#define _INCLUDE_PIX_RECNN_H__
+
+#include <string>
+#include <sstream>
+#include <fstream>
+#include "Base/GemPixObj.h"
+#include "NNet.h"
+#include "RecurrentNeuron.h"
+
+
+using std::string;
+using std::endl;
+using std::ifstream;
+using std::ofstream;
+using std::istringstream;
+
+using namespace TheBrain;
+
+
+/*-----------------------------------------------------------------
+ * CLASS
+ * pix_recNN~
+ *
+ * calculates an audio signal out of a video frame with
+ * a recurrent neural network
+ *
+ * KEYWORDS
+ * pix audio
+ *
+ * DESCRIPTION
+ * 1 signal-outlet
+ */
+class GEM_EXTERN pix_recNN : public GemPixObj
+{
+ CPPEXTERN_HEADER(pix_recNN, GemPixObj)
+
+ public:
+
+ /* Constructor
+ */
+ pix_recNN(t_floatarg arg0, t_floatarg arg1, t_floatarg arg2);
+
+ protected:
+
+ /* Destructor
+ */
+ virtual ~pix_recNN();
+
+
+ //-----------------------------------
+ /* Image STUFF:
+ */
+
+ /* The pixBlock with the current image
+ * pixBlock m_pixBlock;
+ */
+ unsigned char *m_data_;
+ int m_xsize_;
+ int m_ysize_;
+ int m_csize_;
+ int m_format_;
+
+ /* precision of the image:
+ * 1 means every pixel is taken for the calculation,
+ * 2 every second pixel, 3 every third, ...
+ */
+ int precision_;
+
+ /* temporary float for calculation
+ */
+ float **temp_pix_;
+
+ /* processImage
+ */
+ virtual void processImage(imageStruct &image);
+
+
+ //-----------------------------------
+ /* Neural Network STUFF:
+ */
+
+ /* the neural net
+ * (size: buffsize)
+ */
+ NNet<RecurrentNeuron,RecurrentNeuron> *net_;
+
+ /* training modus on
+ * (will only be on for one audio buffer)
+ */
+ bool train_on_;
+
+ /* the number of neurons, which should be
+ * THE SAME as the audio buffer size
+ */
+ int neuron_nr_;
+
+ /* memory determines, how much results from the past
+ * are used to calculate an output value
+ * (0 means only the result from the current frame,
+ * 2 also from the last frame, etc.)
+ */
+ int memory_;
+
+
+ //-----------------------------------
+ /* Audio STUFF:
+ */
+
+ /* the outlet
+ */
+ t_outlet *out0_;
+
+ /* DSP perform
+ */
+ static t_int* perform(t_int* w);
+
+ /* DSP-Message
+ */
+ virtual void dspMess(void *data, t_signal** sp);
+
+
+ //-----------------------------------
+ /* File IO:
+ */
+
+ /* saves the contents of the current net to file
+ */
+ virtual void saveNet(string filename);
+
+ /* loads the parameters of the net from file
+ */
+ virtual void loadNet(string filename);
+
+ private:
+
+ /* a helper to build a new net
+ */
+ virtual void buildNewNet();
+
+ //-----------------------------------
+ /* static members
+ * (interface to the PD world)
+ */
+
+ /* set/get the precision of the image calculation
+ */
+ static void setPrecision(void *data, t_floatarg precision);
+ static void getPrecision(void *data);
+
+ /* method to train the network
+ */
+ static void setTrainOn(void *data);
+
+ /* changes the number of neurons
+ * (which should be the same as the audio buffer)
+ * ATTENTION: a new net will be initialized
+ */
+ static void setNeurons(void *data, t_floatarg neurons);
+ static void getNeurons(void *data);
+
+ /* changes the nblock size
+ * ATTENTION: a new net will be initialized
+ */
+ static void setMemory(void *data, t_floatarg memory);
+ static void getMemory(void *data);
+
+ /* sets the learnrate of the net
+ */
+ static void setLearnrate(void *data, t_floatarg learn_rate);
+ static void getLearnrate(void *data);
+
+ /* DSP callback
+ */
+ static void dspMessCallback(void* data, t_signal** sp);
+
+ /* File IO:
+ */
+ static void saveToFile(void *data, t_symbol *filename);
+ static void loadFromFile(void *data, t_symbol *filename);
+};
+
+#endif // for header file
diff --git a/pix_recNN/readme.txt b/pix_recNN/readme.txt
new file mode 100755
index 0000000..6372504
--- /dev/null
+++ b/pix_recNN/readme.txt
@@ -0,0 +1,27 @@
+pix_recNN - by Georg Holzmann <grh@mur.at>, 2004
+look at http://grh.mur.at/software/thebrain.html
+
+
+--------------------------------license---------------------------------------
+
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation; either version 2
+of the License, or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+In the official pix_recNN distribution, the GNU General Public License is
+in the file gpl.txt
+
+
+-------------------------------information-----------------------------------
+
+see the PD help patch \ No newline at end of file