aboutsummaryrefslogtreecommitdiff
path: root/examples/ann_som.pd
diff options
context:
space:
mode:
authorDavide Morelli <morellid@users.sourceforge.net>2005-05-18 19:11:14 +0000
committerDavide Morelli <morellid@users.sourceforge.net>2005-05-18 19:11:14 +0000
commitb0bc91a579c22b7dfe934095f28cc04211860f81 (patch)
treebe56713caa87f8a8f3522edea1c866dca93f7ece /examples/ann_som.pd
parent2b7a595587419b8fb88b0321856f76d2edb2cf5a (diff)
files moved in examples/ann_som folder
svn path=/trunk/externals/ann/; revision=3021
Diffstat (limited to 'examples/ann_som.pd')
-rw-r--r--examples/ann_som.pd114
1 files changed, 0 insertions, 114 deletions
diff --git a/examples/ann_som.pd b/examples/ann_som.pd
deleted file mode 100644
index 9f03979..0000000
--- a/examples/ann_som.pd
+++ /dev/null
@@ -1,114 +0,0 @@
-#N canvas 50 -127 640 687 10;
-#X msg 131 495 print;
-#X msg 132 528 new 5 8 8;
-#X msg 127 99 init;
-#X msg 128 274 train;
-#X msg 129 296 test;
-#X msg 128 387 write;
-#X obj 70 559 ann_som 4 9 10;
-#X msg 70 49 1 0 0 1;
-#X msg 70 68 0 1 0 1;
-#X msg 70 87 2 1 0 0;
-#X msg 128 118 init 0.5;
-#X msg 128 138 init 1 0.5 0 0.5;
-#X text 234 101 init all weights with "0";
-#X text 235 120 init all weights with "0.5";
-#X text 235 137 init weights for each sensor;
-#X msg 128 163 learn 1;
-#X msg 128 197 learn 1 0.9 0.1;
-#X text 226 163 set learning rate to 1;
-#X msg 128 180 learn 0.5 0.999;
-#X text 227 179 set learning rate to 0.5 and factor to 0.999;
-#X text 227 197 set learning rate to 1 \, factor to 0.9 and offset to 0.1;
-#X msg 128 214 neighbour 1;
-#X msg 128 231 neighbour 0.5 0.999;
-#X msg 128 248 neighbour 1 0.9 0.1;
-#X text 248 215 set neighbourhood to 1;
-#X text 249 231 set neighbourhoodto 0.5 and factor to 0.999;
-#X text 249 249 set neighbourhood to 1 \, factor to 0.9 and offset to 0.1;
-#X text 180 269 set som to "train" mode (learn from sensor-input and output winning neuron);
-#X text 179 291 set som to "test" mode (output winning neuron for sensor-input \, but do not learn !);
-#X msg 129 328 rule INSTAR;
-#X msg 129 345 rule OUTSTAR;
-#X msg 129 362 rule KOHONEN;
-#X text 218 327 learn with IN-STAR rule;
-#X text 219 345 learn with OUT-STAR rule;
-#X text 219 362 learn with KOHONENrule;
-#X msg 128 405 write mysom.som;
-#X msg 129 429 read;
-#X msg 129 447 read mysom.som;
-#X text 156 68 present various data to the SOM;
-#X text 203 495 for debugging;
-#X text 207 530 create a new SOM with 8x8 neurons \, each having 5 sensors;
-#X text 204 561 create a new SOM with 9x10 neurons \, each having 4 sensors;
-#X floatatom 70 614 4 0 0;
-#X text 113 618 winning neuron;
-#N canvas 13 0 889 630 SOMs 0;
-#X text 76 27 SOM :: Self-Organized Maps;
-#X text 55 53 SOMs are "Artificial Neural Networks" \, that are trying to learn something about the data presented to them without a supervisor/teacher.;
-#X text 59 118 in short:;
-#X text 120 119 the neuron \, whose weight-configuration matches the presented data best is the winner (its number (counting from the lower-left corner) is sent to the output);
-#X text 121 163 to match the data better the next time it is presented \, the weights of the winning neuron are adjusted.;
-#X text 121 188 the weights of the neurons neighbouring the winner are adjusted to match the data too \, but not so strong as the winner's.;
-#X text 121 276 lr(n+1)=lr(n)*factor;
-#X text 275 277 learning_rate=lr+offset;
-#X text 121 289 nb(n+1)=nb(n)*factor;
-#X text 275 290 neighbourhood=nb+offset;
-#X text 121 230 both neighbourhood and learning-rate (==amount of how much the weights of the winner (and \, proportional \, the weights of the neighbours) are adjusted) are decreasing recursively with time.;
-#X text 119 319 thus you will sooner or (most of the time) later get a "brain map" \, where similar inputs will activate neurons in specifique regions (like there are regions for seeing and regions for hearing in our brains);
-#X text 97 381 there are various rules \, how to re-adjust the weights of the neurons : in-star \, out-star and kohonen (maybe there are others \, but these i found in literature);
-#X obj 607 220 +;
-#X text 640 182 ...;
-#X obj 579 185 * \$1;
-#X obj 607 185 * \$2;
-#X obj 670 185 * \$0;
-#X obj 579 128 unpack 0 0 0 0 0;
-#X text 602 111 n sensors;
-#X text 705 186 weights 1 to n;
-#X obj 579 90 inlet;
-#X obj 607 288 outlet;
-#X text 594 62 a neuron;
-#X text 566 307 the neuron with the highest weighted sum;
-#X text 567 318 matches best and is therefore the winner;
-#X text 53 452 notes:;
-#X text 101 453 each neuron of the SOM has n sensors. you have to present a list of n floats to the SOM to make it work;
-#X text 102 482 you should init the weights for each sensor with the expected mean of the sensor values before you start training to get best and fastest results;
-#X text 55 87 they were first proposed by the Finnish scientist T.Kohonen in the 80ies (i think).;
-#X text 98 543 if you have no clue \, what's this all about \, maybe you do not need SOMs (which i doubt) or you should have a look at;
-#X text 118 577 http://www.eas.asu.edu/~eee511;
-#X text 118 591 http://www.cis.hut.fi/projects/ica;
-#X connect 13 0 22 0;
-#X connect 15 0 13 0;
-#X connect 16 0 13 0;
-#X connect 17 0 13 0;
-#X connect 18 0 15 0;
-#X connect 18 1 16 0;
-#X connect 18 4 17 0;
-#X connect 21 0 18 0;
-#X restore 535 44 pd SOMs;
-#X text 81 13 ann_som :: train and test Self-Organized Maps;
-#X obj 73 660 ann_som test.som;
-#X text 211 664 load a SOM-file;
-#X connect 0 0 6 0;
-#X connect 1 0 6 0;
-#X connect 2 0 6 0;
-#X connect 3 0 6 0;
-#X connect 4 0 6 0;
-#X connect 5 0 6 0;
-#X connect 6 0 42 0;
-#X connect 7 0 6 0;
-#X connect 8 0 6 0;
-#X connect 9 0 6 0;
-#X connect 11 0 6 0;
-#X connect 15 0 6 0;
-#X connect 16 0 6 0;
-#X connect 18 0 6 0;
-#X connect 21 0 6 0;
-#X connect 22 0 6 0;
-#X connect 23 0 6 0;
-#X connect 29 0 6 0;
-#X connect 30 0 6 0;
-#X connect 31 0 6 0;
-#X connect 35 0 6 0;
-#X connect 36 0 6 0;
-#X connect 37 0 6 0;