%Set Parameters of the simulation gw = 0.1 %Learning rate for Winners (large) gl = 0.01 %Learning Rate for Non-winners (small) Inputtotal = 4 %Number of different input patterns Htotal = 2 %Number of Hidden (non-input) Units Itotal = 98 %Dimensionality of Input patterns (7X14 grid = 98) Inum = 0 Wnum = 0 %the input patterns are defined below. They consist of the four patterns AA, AB, BA, and BB drawn on a seven by fourteen grid. I(1, 1:14) = [0 0 0 1 0 0 0 0 0 0 1 0 0 0]; I(1,15:28) = [0 0 1 0 1 0 0 0 0 1 0 1 0 0]; I(1,29:42) = [0 1 0 0 0 1 0 0 1 0 0 0 1 0]; I(1,43:56) = [0 1 1 1 1 1 0 0 1 1 1 1 1 0]; I(1,57:70) = [0 1 0 0 0 1 0 0 1 0 0 0 1 0]; I(1,71:84) = [0 1 0 0 0 1 0 0 1 0 0 0 1 0]; I(1,85:98) = [0 1 0 0 0 1 0 0 1 0 0 0 1 0]; I(2, 1:14) = [0 0 0 1 0 0 0 0 1 1 1 1 0 0]; I(2,15:28) = [0 0 1 0 1 0 0 0 0 1 0 0 1 0]; I(2,29:42) = [0 1 0 0 0 1 0 0 0 1 0 0 1 0]; I(2,43:56) = [0 1 1 1 1 1 0 0 0 1 1 1 0 0]; I(2,57:70) = [0 1 0 0 0 1 0 0 0 1 0 0 1 0]; I(2,71:84) = [0 1 0 0 0 1 0 0 0 1 0 0 1 0]; I(2,85:98) = [0 1 0 0 0 1 0 0 1 1 1 1 0 0]; I(3, 1:14) = [0 1 1 1 1 0 0 0 0 0 1 0 0 0]; I(3,15:28) = [0 0 1 0 0 1 0 0 0 1 0 1 0 0]; I(3,29:42) = [0 0 1 0 0 1 0 0 1 0 0 0 1 0]; I(3,43:56) = [0 0 1 1 1 0 0 0 1 1 1 1 1 0]; I(3,57:70) = [0 0 1 0 0 1 0 0 1 0 0 0 1 0]; I(3,71:84) = [0 0 1 0 0 1 0 0 1 0 0 0 1 0]; I(3,85:98) = [0 1 1 1 1 0 0 0 1 0 0 0 1 0]; I(4, 1:14) = [0 1 1 1 1 0 0 0 1 1 1 1 0 0]; I(4,15:28) = [0 0 1 0 0 1 0 0 0 1 0 0 1 0]; I(4,29:42) = [0 0 1 0 0 1 0 0 0 1 0 0 1 0]; I(4,43:56) = [0 0 1 1 1 0 0 0 0 1 1 1 0 0]; I(4,57:70) = [0 0 1 0 0 1 0 0 0 1 0 0 1 0]; I(4,71:84) = [0 0 1 0 0 1 0 0 0 1 0 0 1 0]; I(4,85:98) = [0 1 1 1 1 0 0 0 1 1 1 1 0 0]; %Randomize the Weights to start with. Then, since they must sum to 1, normalize %them by dividing each individual weight by the sum of all the weights. for Hcount = 1:Htotal NormTotal = 0; for Icount = 1:Itotal W(Hcount, Icount) = rand; NormTotal = W(Hcount, Icount) + NormTotal; end %Icount for Icount = 1:Itotal W(Hcount, Icount) = W(Hcount, Icount) / NormTotal; end %Icount end %Hcount %Now start the simulation itself. For each of 100 training epochs, cycle %through each of the training inputs in order for epochs = 1:100 for Inputcount = 1:Inputtotal OMax = 0; for j = 1:Htotal O(j) = 0; end %convert the weight matrix to vectors for j = 1:Htotal %j is the element of the hidden cluster O(j) = dot(I(Inputcount,1:Itotal),W(j,1:Itotal)); %determine if this unit is the winner yet if O(j) > OMax winner = j; OMax = O(j); end end %for %Now that the winning units number is stored in variable Winner, set %all the others to zero for j = 1:Htotal O(j) = 0; end %j O(winner) = 1 %(Set the output/activation of the winner to 1) %Training Portion %Now that the activations with the current set of weights has been calculated %for this input pattern, update the weights according the learning rule %update all elements, changing the amount of learning for the winner only for j = 1:Htotal %Count Number of active lines for this (j) element Nj = 0; for i = 1:Itotal if I(Inputcount,i) ==1 Nj = Nj+1; end end %update weights for elements if j == winner g = gw; %fast learning else g = gl; %slow learning end for i = 1:Itotal if I(Inputcount,i) == 1 W(j,i) = W(j,i) + g*(1/Nj) - g*W(j,i); %update active lines else W(j,i) = W(j,i) - g*W(j,i); %Update inactive lines end end end end %Inputcount end %epoch