Browse Source

Upload files to 'Agnotron-model'

Michael Liu Happ 1 year ago
parent
commit
d0e10ed722
33 changed files with 1095 additions and 0 deletions
  1. BIN
      Agnotron-model/Clustering Network/README.rtf
  2. 1 0
      Agnotron-model/Clustering Network/agnoPlotting_100D.m
  3. 1 0
      Agnotron-model/Clustering Network/agnoPlotting_2D.m
  4. 1 0
      Agnotron-model/Clustering Network/agnoPlotting_2D_full.m
  5. BIN
      Agnotron-model/Clustering Network/agnoPlotting_clusteringAndWeights.m
  6. 1 0
      Agnotron-model/Clustering Network/dataGen.m
  7. BIN
      Agnotron-model/Clustering Network/genData100D_10c_250ppc.mat
  8. BIN
      Agnotron-model/Clustering Network/genData100D_10c_50ppc.mat
  9. BIN
      Agnotron-model/Clustering Network/genData2D_3c_100ppc.mat
  10. BIN
      Agnotron-model/Clustering Network/genData2D_3c_500ppc.mat
  11. 1 0
      Agnotron-model/Clustering Network/mismatchIter_v2.m
  12. 1 0
      Agnotron-model/Clustering Network/pcaIter_v6.m
  13. 265 0
      Agnotron-model/Clustering Network/pcaNetworkLoops.m
  14. 289 0
      Agnotron-model/Clustering Network/pcaPlusMN_2D_v4.m
  15. 309 0
      Agnotron-model/Clustering Network/pcaPlusMN_HD_v4.m
  16. 1 0
      Agnotron-model/Clustering Network/updateC_v5.m
  17. 1 0
      Agnotron-model/Clustering Network/updateD_v2.m
  18. 1 0
      Agnotron-model/Clustering Network/updateM_v4.m
  19. 1 0
      Agnotron-model/Clustering Network/updateW_v5.m
  20. 36 0
      Agnotron-model/Sequence Learning Network/EXTRA_plot_total_mismatch.m
  21. 1 0
      Agnotron-model/Sequence Learning Network/cdIter_v3.m
  22. 1 0
      Agnotron-model/Sequence Learning Network/cdWeight_plotter_v2.m
  23. 1 0
      Agnotron-model/Sequence Learning Network/mmIter_v1.m
  24. BIN
      Agnotron-model/Sequence Learning Network/old/cdIter_v2.m
  25. BIN
      Agnotron-model/Sequence Learning Network/old/sequenceCG_v19_wd2Chains.m
  26. BIN
      Agnotron-model/Sequence Learning Network/seqCD_plotter_forShow.m
  27. 1 0
      Agnotron-model/Sequence Learning Network/seqCD_plotter_v6.m
  28. BIN
      Agnotron-model/Sequence Learning Network/sequenceCG_v20.m
  29. 181 0
      Agnotron-model/Sequence Learning Network/sequenceCG_v20_AA_works.m
  30. BIN
      Agnotron-model/Sequence Learning Network/sequenceCG_v20_inputPlay.m
  31. BIN
      Agnotron-model/Sequence Learning Network/sortByAct.m
  32. BIN
      Agnotron-model/Sequence Learning Network/sortbyCorr.m
  33. 1 0
      Agnotron-model/Sequence Learning Network/updateWs_v2.m

BIN
Agnotron-model/Clustering Network/README.rtf


+ 1 - 0
Agnotron-model/Clustering Network/agnoPlotting_100D.m

@@ -0,0 +1 @@
+function agnoPlotting_100D(bigY, mmNet, pcaNet, ts_idx ,iterations)

yT = bigY(:, ts_idx-1);
cT = pcaNet.bigC(:, ts_idx-1);

figure(1)
if pcaNet.learning == 1
    plot(mmNet.allErrors(1:ts_idx), 'r-');
else
    plot(mmNet.allErrors(1:ts_idx), 'k-');
end
% hold on;
% refline(0, pcaNet.sigmaThresh);
% hold off;
title(['Total Error over Time - ts: ' num2str(ts_idx)])
xlabel('Timestep')
ylabel('Error')
xlim([0 iterations])
ylim([0 500])
pause(1e-5)

+ 1 - 0
Agnotron-model/Clustering Network/agnoPlotting_2D.m

@@ -0,0 +1 @@
+function agnoPlotting_2D(bigY, mmNet, pcaNet, ts_idx ,iterations)

yT = bigY(:, ts_idx-1);
cT = pcaNet.bigC(:, ts_idx-1);

figure(1)
if pcaNet.learning == 1
    plot(mmNet.allErrors(1:ts_idx), 'r-');
else
    plot(mmNet.allErrors(1:ts_idx), 'k-');
end
hold on;
%refline(0, pcaNet.sigmaThresh);
plot([500.5 500.5], [0 25], 'k--')
plot([1000.5 1000.5], [0 25], 'k--')
plot(1001, mmNet.allErrors(1001), 'b.', 'MarkerSize', 12)
hold off;
title(['Total Error over Time - ts: ' num2str(ts_idx)])
xlabel('Timestep')
ylabel('Error')
xlim([995 1095])
ylim([0 10])
% pause(1e-5)
makepretty;



% activityPlot = zeros(7,2);
% threshPts = zeros(7,2);
% for cIdx = 1:7
%     activityPlot(cIdx,:) = cT(cIdx) .* (pcaNet.W(cIdx,:) ./ norm(pcaNet.W(cIdx,:)));
%     
%     effectiveExcite = 1 - exp((-pcaNet.excitability(cIdx)/5).^3);
%     actThresh = 1 - effectiveExcite; % this is now a vector; great
%     threshPts(cIdx,:) = actThresh .* (pcaNet.W(cIdx,:) ./ norm(pcaNet.W(cIdx,:)));
% end

colors = {'r.', 'y.', 'c.', 'g.', 'm.', 'k.', 'b.'};
for cIdx = 1:7
    thisClust = find(pcaNet.clusters == cIdx);
    figure(5);
    plot(bigY(1, thisClust), bigY(2, thisClust), colors{cIdx})
    %         if ts_idx == 2
    hold on;
    %         end
end
%     title('Clustered Data')

% figure(5)
% plotv(pcaNet.W(1,:)', 'r')
% %     plotv(activityPlot(1,:)', 'r*')
% %     plotv(threshPts(1,:)', 'kx')
% plotv(pcaNet.W(2,:)', 'b')
% %     plotv(activityPlot(2,:)', 'b*')
% %     plotv(threshPts(2,:)', 'kx')
% plotv(pcaNet.W(3,:)', 'g')
% %     plotv(activityPlot(3,:)', 'g*')
% %     plotv(threshPts(3,:)', 'kx')
% plotv(pcaNet.W(4,:)', 'c')
% %     plotv(activityPlot(4,:)', 'c*')
% %     plotv(threshPts(4,:)', 'kx')
% plotv(pcaNet.W(5,:)', 'm')
%     plotv(activityPlot(5,:)', 'm*')
%     plotv(threshPts(5,:)', 'kx')
%plotv(pcaNet.W(6,:)', 'k')
%     plotv(activityPlot(6,:)', 'k*')
%     plotv(threshPts(6,:)', 'kx')
%plotv(pcaNet.W(7,:)', 'y')
%     plotv(activityPlot(7,:)', 'y*')
%     plotv(threshPts(7,:)', 'kx')
hold off;
title(['Clusters and Weights, iteration: ' num2str(ts_idx)])
makepretty;



% figure(1212)
% silhouette(bigY',pcaNet.clusters)

% 
% if ts_idx == 2
%     figure(10)
%     plot(bigY(1,:), bigY(2,:), 'k.')
%     hold on;
%     plotv(pcaNet.W(1,:)', 'r')
%     plotv(pcaNet.W(2,:)', 'b')
%     plotv(pcaNet.W(3,:)', 'g')
%     plotv(pcaNet.W(4,:)', 'c')
%     plotv(pcaNet.W(5,:)', 'm')
%     plotv(pcaNet.W(6,:)', 'k')
%     plotv(pcaNet.W(7,:)', 'y')
% end

% if ts_idx == iterations
%     figure(20)
%     plot(1:100, mmNet.allErrors(1:100), 'r-');
%     hold on;
%     plot(101:200, mmNet.allErrors(101:200), 'b-');
%     plot(201:300, mmNet.allErrors(201:300), 'k-');
% end

% figure(25)
% plot(ts_idx, norm(pcaNet.W(1,:)), 'r.')
% if ts_idx == 2
%     hold on;
% end
% plot(ts_idx, norm(pcaNet.W(2,:)), 'b.')
% plot(ts_idx, norm(pcaNet.W(3,:)), 'g.')
% plot(ts_idx, norm(pcaNet.W(4,:)), 'c.')
% plot(ts_idx, norm(pcaNet.W(5,:)), 'm.')
% plot(ts_idx, norm(pcaNet.W(6,:)), 'k.')
% plot(ts_idx, norm(pcaNet.W(7,:)), 'y.')
% xlim([0 iterations])
% title('Weight Magnitudes')
% ylabel('Norm of Weight')
% xlabel('Timestep')

+ 1 - 0
Agnotron-model/Clustering Network/agnoPlotting_2D_full.m

@@ -0,0 +1 @@
+function agnoPlotting_2D(bigY, mmNet, pcaNet, ts_idx ,iterations)

yT = bigY(:, ts_idx-1);
cT = pcaNet.bigC(:, ts_idx-1);

figure(1)
if pcaNet.learning == 1
    plot(mmNet.allErrors(1:ts_idx), 'r-');
else
    plot(mmNet.allErrors(1:ts_idx), 'k-');
end
hold on;
refline(0, pcaNet.sigmaThresh);
hold off;
title(['Total Error over Time - ts: ' num2str(ts_idx)])
xlabel('Timestep')
ylabel('Error')
xlim([0 iterations])
ylim([0 5])
pause(1e-5)



% activityPlot = zeros(7,2);
% threshPts = zeros(7,2);
% for cIdx = 1:7
%     activityPlot(cIdx,:) = cT(cIdx) .* (pcaNet.W(cIdx,:) ./ norm(pcaNet.W(cIdx,:)));
%     
%     effectiveExcite = 1 - exp((-pcaNet.excitability(cIdx)/5).^3);
%     actThresh = 1 - effectiveExcite; % this is now a vector; great
%     threshPts(cIdx,:) = actThresh .* (pcaNet.W(cIdx,:) ./ norm(pcaNet.W(cIdx,:)));
% end

colors = {'r.', 'b.', 'g.', 'c.', 'm.', 'k.', 'y.'};
for cIdx = 1:7
    thisClust = find(pcaNet.clusters == cIdx);
    figure(5);
    plot(bigY(1, thisClust), bigY(2, thisClust), colors{cIdx})
    %         if ts_idx == 2
    hold on;
    %         end
end
%     title('Clustered Data')

figure(5)
plotv(pcaNet.W(1,:)', 'r')
%     plotv(activityPlot(1,:)', 'r*')
%     plotv(threshPts(1,:)', 'kx')
plotv(pcaNet.W(2,:)', 'b')
%     plotv(activityPlot(2,:)', 'b*')
%     plotv(threshPts(2,:)', 'kx')
plotv(pcaNet.W(3,:)', 'g')
%     plotv(activityPlot(3,:)', 'g*')
%     plotv(threshPts(3,:)', 'kx')
plotv(pcaNet.W(4,:)', 'c')
%     plotv(activityPlot(4,:)', 'c*')
%     plotv(threshPts(4,:)', 'kx')
plotv(pcaNet.W(5,:)', 'm')
%     plotv(activityPlot(5,:)', 'm*')
%     plotv(threshPts(5,:)', 'kx')
plotv(pcaNet.W(6,:)', 'k')
%     plotv(activityPlot(6,:)', 'k*')
%     plotv(threshPts(6,:)', 'kx')
plotv(pcaNet.W(7,:)', 'y')
%     plotv(activityPlot(7,:)', 'y*')
%     plotv(threshPts(7,:)', 'kx')
hold off;
title(['Clusters and Weights, iteration: ' num2str(ts_idx)])


if ts_idx == 2
    figure(10)
    plot(bigY(1,:), bigY(2,:), 'k.')
    hold on;
    plotv(pcaNet.W(1,:)', 'r')
    plotv(pcaNet.W(2,:)', 'b')
    plotv(pcaNet.W(3,:)', 'g')
    plotv(pcaNet.W(4,:)', 'c')
    plotv(pcaNet.W(5,:)', 'm')
    plotv(pcaNet.W(6,:)', 'k')
    plotv(pcaNet.W(7,:)', 'y')
end

if ts_idx == iterations
    figure(20)
    plot(1:100, mmNet.allErrors(1:100), 'r-');
    hold on;
    plot(101:200, mmNet.allErrors(101:200), 'b-');
    plot(201:300, mmNet.allErrors(201:300), 'k-');
end

figure(25)
plot(ts_idx, norm(pcaNet.W(1,:)), 'r.')
if ts_idx == 2
    hold on;
end
plot(ts_idx, norm(pcaNet.W(2,:)), 'b.')
plot(ts_idx, norm(pcaNet.W(3,:)), 'g.')
plot(ts_idx, norm(pcaNet.W(4,:)), 'c.')
plot(ts_idx, norm(pcaNet.W(5,:)), 'm.')
plot(ts_idx, norm(pcaNet.W(6,:)), 'k.')
plot(ts_idx, norm(pcaNet.W(7,:)), 'y.')
xlim([0 iterations])
title('Weight Magnitudes')
ylabel('Norm of Weight')
xlabel('Timestep')

BIN
Agnotron-model/Clustering Network/agnoPlotting_clusteringAndWeights.m


+ 1 - 0
Agnotron-model/Clustering Network/dataGen.m

@@ -0,0 +1 @@
+%%% data generator
clear 
close all

%% Configs

numDims = 100;
numClusters = 10;
stdClust = 0.5; %std within cluster - not so bad at 1
clusterSep = 25; % std of distribution from which clusters are drawn
ptsPerCluster = 250;

%% 

stds = stdClust .* ones(numClusters, 1);
clustMeans = clusterSep*rand(numClusters, numDims); % original data is strictly pos.

allPts_unshuff = [];
clustIDs_unshuff = [];
for clustIdx = 1:numClusters
    newBlob = [];
    for dim = 1:numDims
        newBlobDim = clustMeans(clustIdx,dim)+stds(clustIdx).*randn(ptsPerCluster, 1);
        newBlob = [newBlob newBlobDim]; %100 pts in 100D space
    end
    
    clustIDs_unshuff(end+1:end+ptsPerCluster) = clustIdx;
    allPts_unshuff = [allPts_unshuff; newBlob]; 
end

allPts_dm_unshuff = allPts_unshuff;

for dim = 1:numDims
    allPts_dm_unshuff(:,dim) = allPts_dm_unshuff(:,dim) - mean(allPts_dm_unshuff(:,dim));
end


% calc clust means for use in dot product calculation
clustMeans = zeros(size(clustMeans));
for clustIdx = 1:numClusters
    startPt = ((clustIdx-1)*ptsPerCluster) + 1; %first pt in cluster
    endPt = startPt + ptsPerCluster - 1; %last pt in cluster
    thisBlob = allPts_dm_unshuff(startPt:endPt, :);
    thisMean = mean(thisBlob, 1); 
    clustMeans(clustIdx, :) = thisMean ./ norm(thisMean); %normalized
end

shuffIdx = randperm(ptsPerCluster*numClusters);

allPts = allPts_unshuff(shuffIdx, :);
allPts_dm = allPts_dm_unshuff(shuffIdx, :);
clustIDs = clustIDs_unshuff(shuffIdx);



ccDot = zeros(numClusters);
ccDist = zeros(numClusters);
for clust_i = 1:numClusters
    for clust_j = 1:numClusters
        ccDot(clust_i, clust_j) = abs(clustMeans(clust_i,:) * clustMeans(clust_j,:)');
        ccDist(clust_i, clust_j) = norm(clustMeans(clust_i,:) - clustMeans(clust_j,:));
    end
end

ptDist = zeros(numClusters*ptsPerCluster);
for ptIdx = 1:size(allPts, 1)
    for ptJdx = 1:size(allPts, 1)
        ptDist(ptIdx, ptJdx) =...
            norm(allPts_unshuff(ptIdx,:) - allPts_unshuff(ptJdx,:));
    end
end


if numDims == 2
    figure(10);
    plot(allPts_dm(:,1)', allPts_dm(:,2)', '.')
    grid on;
    hold on;
    plot(0, 0, 'r*')
    hold off;
end


figure(1); % useful bc weight vectors should be as separate as possible
imagesc(ccDot); colorbar
title('Dot Products of Cluster Means')
xlabel('Cluster')
ylabel('Cluster')

figure(2)
imagesc(ccDist); colorbar
title('Average Distance between Clusters')
xlabel('Cluster')
ylabel('Cluster')

figure(3)
imagesc(ptDist); colorbar
title('Pointwise Distances')



savename = ['genData' num2str(numDims) 'D_' num2str(numClusters)...
    'c_' num2str(ptsPerCluster) 'ppc.mat'];

clearvars -except allPts allPts_dm allPts_unshuff...
    allPts_dm_unshuff clustIDs clustIDs_unshuff savename

save(savename)

BIN
Agnotron-model/Clustering Network/genData100D_10c_250ppc.mat


BIN
Agnotron-model/Clustering Network/genData100D_10c_50ppc.mat


BIN
Agnotron-model/Clustering Network/genData2D_3c_100ppc.mat


BIN
Agnotron-model/Clustering Network/genData2D_3c_500ppc.mat


+ 1 - 0
Agnotron-model/Clustering Network/mismatchIter_v2.m

@@ -0,0 +1 @@
+function mmNet = mismatchIter_v2(cIn, bigY, ts_idx, mmNet, trackVars)
y_t = bigY(:, ts_idx);
c_t = cIn;

% Ny cells
v_y = (mmNet.we_yn * y_t) - (mmNet.wi_cn * mmNet.r_cn * c_t);
nActiv_y = v_y;
nActiv_y(nActiv_y<mmNet.thresh) = 0;

% Nc cells
v_c = (mmNet.we_cn * c_t) - (mmNet.wi_yn * mmNet.r_yn * y_t);
nActiv_c = v_c;
nActiv_c(nActiv_c<mmNet.thresh) = 0;

addpart = (mmNet.we_cn * c_t);
% disp(mean(addpart(:)))

% update weights
if mmNet.y_plastic == 1
    deltaWYe = -1*mmNet.eta.*v_y*y_t';
    deltaWYi = mmNet.eta.*v_c*y_t'*mmNet.r_yn;
else
    deltaWYe = zeros(size(mmNet.we_yn));
    deltaWYi = zeros(size(mmNet.wi_yn));
end

if mmNet.c_plastic == 1
    deltaWCe = -1*mmNet.eta.*v_c*c_t';
    deltaWCi = mmNet.eta.*v_y*c_t'*mmNet.r_cn;
else
    deltaWCe = zeros(size(mmNet.we_cn));
    deltaWCi = zeros(size(mmNet.wi_cn));
end

mmNet.we_yn = mmNet.we_yn + deltaWYe;
mmNet.wi_yn = mmNet.wi_yn + deltaWYi;
mmNet.we_cn = mmNet.we_cn + deltaWCe;
mmNet.wi_cn = mmNet.wi_cn + deltaWCi;

if mmNet.signed_synapses == 1 %forbid negative w if using ss
    mmNet.we_yn(mmNet.we_yn<0) = eps;
    mmNet.wi_yn(mmNet.wi_yn<0) = eps;
    mmNet.we_cn(mmNet.we_cn<0) = eps;
    mmNet.wi_cn(mmNet.wi_cn<0) = eps;
end

% update lists tracking variables -- THIS IS VERY SLOW! Skip sometimes
if trackVars == 1
    mmNet.Vs_y(:,ts_idx) = v_y;
    mmNet.Vs_c(:,ts_idx) = v_c;
    mmNet.Fs_y(:,ts_idx) = nActiv_y;
    mmNet.Fs_c(:,ts_idx) = nActiv_c;


    mmNet.yWs_e(:,:,ts_idx+1) = mmNet.we_yn;
    mmNet.cWs_e(:,:,ts_idx+1) = mmNet.we_cn;
    mmNet.yWs_i(:,:,ts_idx+1) = mmNet.wi_yn;
    mmNet.cWs_i(:,:,ts_idx+1) = mmNet.wi_cn;

    mmNet.wyChanges_e(:,:,ts_idx) = deltaWYe;
    mmNet.wcChanges_e(:,:,ts_idx) = deltaWCe;
    mmNet.wyChanges_i(:,:,ts_idx) = deltaWYi;
    mmNet.wcChanges_i(:,:,ts_idx) = deltaWCi;
end

mmNet.errors_y(ts_idx) = sum(nActiv_y);
mmNet.errors_c(ts_idx) = sum(nActiv_c);
mmNet.allErrors(ts_idx) = mmNet.errors_y(ts_idx) + mmNet.errors_c(ts_idx);
mmNet.yIn = bigY;
mmNet.cIn = cIn;

% if mod(ts_idx, 100) == 0
%     yAct = reshape(nActiv_y, [20, 10]);
%     cAct = reshape(nActiv_c, [20, 10]);
%     maxmax = max(max(yAct(:)), max(cAct(:)));
%     
%     ySum = sum(nActiv_y);
%     cSum = sum(nActiv_c);
%     
%     
%     figure(3)
%     subplot(121)
%     imagesc(yAct)
%     caxis([0 maxmax])
%     colorbar;
%     title(['Y Network Activity - ' num2str(ts_idx) ' - Tot: ' num2str(ySum)])
% 
%     subplot(122)
%     imagesc(cAct)
%     caxis([0 maxmax])
%     colorbar;
%     title(['C Network Activity - Tot: ' num2str(cSum)])
% end

end

+ 1 - 0
Agnotron-model/Clustering Network/pcaIter_v6.m

@@ -0,0 +1 @@
+function pcaNet = pcaIter_v6(bigY, ts_idx, pcaNet)

% calc features for T+1 based on output at T
yT = bigY(:, ts_idx-1);
 
cT = pcaNet.bigC(:, ts_idx-1);
% cT = cT - eps; % IDK

if pcaNet.learning == 1
    pcaNet.D = updateD_v2(pcaNet.D, cT);

    pcaNet.W = updateW_v5(pcaNet.W, cT, yT, pcaNet.D, pcaNet.etaW, pcaNet.capW, pcaNet.maxW); 

    pcaNet.M = updateM_v4(pcaNet.M, cT, pcaNet.D, pcaNet.inhibCap, pcaNet.etaM, pcaNet.maxM);
end


% update outputs for T+1
y = bigY(:,ts_idx);

c = updateC_v5(...
    pcaNet.W, pcaNet.M, y, pcaNet.changeThresh);

[maxx, thisCluster] = max(c);

pcaNet.bigC(:, ts_idx) = 0;

if maxx > 0
    pcaNet.clusters = [pcaNet.clusters; thisCluster];
    pcaNet.bigC(thisCluster, ts_idx) = c(thisCluster);
else
    pcaNet.clusters = [pcaNet.clusters; 0];
end



end

+ 265 - 0
Agnotron-model/Clustering Network/pcaNetworkLoops.m

@@ -0,0 +1,265 @@
+close all;
+clear all;
+
+avg_taus = [];
+taus_3 = [];
+mm_etas = [0:0.0125:2];
+
+%% one-time setup
+seed = 12345; %10 was working for many of these demos %56; %23, 10 for 100D 10 clusters
+    % seed choice affects initial weight direction, which affects peaks of
+    % error plots
+    
+trackVars = 1; %turning on slows things WAY DOWN
+
+
+% Select Dataset
+%load('genData1.mat')
+% load('genData2D_4c_200ppc.mat')
+load('genData2D_3c_500ppc.mat')
+% load('genData100D_10.mat')
+% load('genData_realSpec_try2.mat')
+% load('genData_realSpec_5c.mat')
+
+
+% Define how data will be presented
+twoClust_dm_unshuff = allPts_dm_unshuff(1:100, :);
+twoClust_unshuff = allPts_unshuff(1:100, :);
+
+shuffIdx = 1:100;%randperm(500);
+
+part1_dm = twoClust_dm_unshuff(shuffIdx,:);
+part1 = twoClust_unshuff(shuffIdx, :);
+
+% bigY is input fed into clustering network - note that it's de-meaned
+bigY = [allPts_dm_unshuff']; %[part1_dm' allPts_dm'];%_unshuff'];% allPts_dm' allPts_dm'];%(1:500,:)'];% allPts_dm'];
+
+% mmY is input fed into mismatch network - just a non-de-meaned version of
+% bigY
+mmY = [allPts_unshuff']; %[part1' allPts'];%_unshuff'];% allPts' allPts'];%(1:500,:)'];%  allPts'];
+
+% PCA CONFIGS
+pcaNet.changeThresh = 1e-4; % for output convergence
+initWeightMag = 0.1; %0.8 for some demos % should be 0.1
+pcaNet.capW = 10;%500; %max weight to each output cell -- CHANGING THIS DIDN'T MAKE A DIFFERENCE
+pcaNet.inhibCap = 10; %NEED 10 FOR SYNTHETIC, 25 FOR REAL... -- CHANGING THIS DOES MATTER
+
+pcaNet.etaW = 0.05; %originally each was 0.5 -- MAKING THIS TOO HIGH DOESN'T MATTER MUCH
+pcaNet.etaM = 0.05; % originally 0.5 -- CHANGING THIS DOESN'T MATTER MUCH
+pcaNet.maxM = 1; 
+pcaNet.maxW = 1;
+
+% MISMATCH CONFIGS
+% CHANGING THIS DRASTICALLY AFFECTS SHAPE OF ERROR PLOTS
+mmNet.thresh = 0.05; % CHANGING THIS AFFECTS SHAPE OF ERROR PLOTS
+
+% MISTMATCH STRUCTURE CONFIGS
+mmNet.signed_synapses = 1; %force positive weight coeffs 
+mmNet.c_plastic = 1; % 
+mmNet.y_plastic = 0;
+
+% MISMATCH ARCHITECTURE CONFIGS
+yE_type = 'rand'; %'rand' or 'randnorm'
+cE_type = 'rand';  
+yI_type = 'rand';
+cI_type = 'rand';
+yR_type = 'direct'; %'rand' or 'direct' 
+cR_type = 'direct'; %'rand' or 'direct'
+
+% MISMATCH NETWORK CONFIGS
+nCells_y = size(mmY,1); %one input per input dim
+nCells_c = 10; % (500 works on synthetic) 10 clusters in 100D case (nb 100d is a lot)
+nCells_Ny = 5; %not super sure how high or low this needs to be
+nCells_Nc = 5; %but 100 and 100 should be enough to give us convex cone
+iterations = size(bigY,2);
+
+
+% Total Configs -- this doesn't matter, we're always learning
+% (learningThresh = 0)
+sigmaThresh = 2.5; %1 works for seed 10 %2.5;%2.5;%%0.25;%25; %1 worked well 
+deltaLearn = 0.2; %(0.1) % no buffer -> incorrect clustering (not obvious why needed for shuffled data)
+learningThresh = 0;%1; %make it deltaLearn to basically eliminate deltaLearn %0.5; %always learning -> incorrect clustering
+pcaNet.sigmaThresh = sigmaThresh;
+
+
+pcaNet.learning = 1; %this is set to 0 if mismatch is low, 1 if high
+learningSig = 1;
+rng(seed) 
+
+%% looping sections
+for mIdx = 1:length(mm_etas)
+    mmNet.eta = mm_etas(mIdx);
+    disp(['loop ' num2str(mIdx) ' of ' num2str(length(mm_etas))])
+    
+    
+    W_init = initWeightMag*randn(nCells_c, nCells_y);
+    M_init = 0*initWeightMag*rand(nCells_c); %initially 0
+    
+    for idx = 1:nCells_c
+        M_init(idx, idx) = 0; %nrns don't drive selves
+    end
+    
+    D_init = zeros(nCells_c,1);
+    pcaNet.bigC = zeros(nCells_c, iterations);
+    
+    pcaNet.W = W_init;
+    pcaNet.M = M_init;
+    pcaNet.D = D_init;
+    
+    c = updateC_v5(...
+        pcaNet.W, pcaNet.M, bigY(:,1), pcaNet.changeThresh);
+    
+    [maxx, thisCluster] = max(c);
+    
+    if maxx > 0
+        pcaNet.clusters = thisCluster;
+        pcaNet.bigC(thisCluster, 1) = c(thisCluster);
+    else
+        pcaNet.clusters = 0;
+    end
+    
+    cT = zeros(size(c));
+    cT(thisCluster) = c(thisCluster); % c at timestep t
+    y = bigY(:,1);
+    
+    rng(seed);
+    
+    % Mismatch Setup
+    switch yE_type
+        case 'rand'
+            mmNet.we_yn = (rand(nCells_Ny, nCells_y))./nCells_y;
+        case 'randnorm'
+            mmNet.we_yn = (randn(nCells_Ny, nCells_y))./nCells_y;
+    end
+    
+    switch yI_type
+        case 'rand'
+            mmNet.wi_yn = (rand(nCells_Nc, nCells_y))./nCells_y;
+        case 'randnorm'
+            mmNet.wi_yn = (randn(nCells_Nc, nCells_y))./nCells_y;
+    end
+    
+    switch yR_type
+        case 'rand'
+            mmNet.r_yn = rand(nCells_y);
+        case 'direct'
+            mmNet.r_yn = eye(nCells_y);
+    end
+    
+    switch cE_type
+        case 'rand'
+            mmNet.we_cn = (rand(nCells_Nc, nCells_c));%./nCells_c;
+            %
+        case 'randnorm'
+            mmNet.we_cn = (randn(nCells_Nc, nCells_c));%./nCells_c;
+    end
+    
+    switch cI_type
+        case 'rand'
+            mmNet.wi_cn = (rand(nCells_Ny, nCells_c));%./nCells_c;
+            %
+        case 'randnorm'
+            mmNet.wi_cn = (randn(nCells_Ny, nCells_c));%./nCells_c;
+    end
+    
+    switch cR_type
+        case 'rand'
+            mmNet.r_cn = rand(nCells_c);
+        case 'direct'
+            mmNet.r_cn = eye(nCells_c);
+    end
+    
+    timesteps = length(bigY(1,:));
+    
+    if trackVars == 1
+        mmNet.Vs_y = zeros(nCells_Ny, timesteps);
+        mmNet.Vs_c = zeros(nCells_Nc, timesteps);
+        mmNet.Fs_y = zeros(nCells_Ny, timesteps);
+        mmNet.Fs_c = zeros(nCells_Nc, timesteps);
+        
+        
+        mmNet.yWs_e = zeros(nCells_Ny, nCells_y, timesteps+1);
+        mmNet.cWs_e = zeros(nCells_Nc, nCells_c, timesteps+1);
+        mmNet.yWs_i = zeros(nCells_Nc, nCells_y, timesteps+1);
+        mmNet.cWs_i = zeros(nCells_Ny, nCells_c, timesteps+1);
+        
+        mmNet.wyChanges_e = zeros(nCells_Ny, nCells_y, timesteps);
+        mmNet.wcChanges_e = zeros(nCells_Nc, nCells_c, timesteps);
+        mmNet.wyChanges_i = zeros(nCells_Nc, nCells_y, timesteps);
+        mmNet.wcChanges_i = zeros(nCells_Ny, nCells_c, timesteps);
+        
+        mmNet.yWs_e(:,:,1) = mmNet.we_yn;
+        mmNet.cWs_e(:,:,1) = mmNet.we_cn;
+        mmNet.yWs_i(:,:,1) = mmNet.wi_yn;
+        mmNet.cWs_i(:,:,1) = mmNet.wi_cn;
+    end
+    
+    mmNet.errors_y = zeros(timesteps, 1);
+    mmNet.errors_c = zeros(timesteps, 1);
+    mmNet.allErrors = zeros(timesteps,1);
+    
+    % we've already done 1 iter of pca, so here do 1 iter of mm
+    mmNet = mismatchIter_v2(cT, mmY, 1, mmNet, trackVars);
+    
+    
+    disp('setup complete')
+    
+    % Run through algorithm (make functions for PCAIter and MNIter)
+    for ts_idx = 2:iterations
+        pcaNet = pcaIter_v6(bigY, ts_idx, pcaNet);
+        
+        
+        pcaC = pcaNet.bigC(:, ts_idx);
+        
+        pcaC = pcaC>eps;
+        
+        mmNet = mismatchIter_v2(pcaC, mmY, ts_idx, mmNet, trackVars);
+        
+        sigmaNode = mmNet.allErrors(ts_idx);
+        if sigmaNode > sigmaThresh
+            learningSig = learningSig + deltaLearn;
+            if learningSig > 1
+                learningSig = 1;
+            end
+        else
+            learningSig = learningSig - deltaLearn;
+            if learningSig < 0
+                learningSig = 0;
+            end
+        end
+        
+        pcaNet.learning = 1;
+        
+        
+
+    end
+    
+    % what are taus?
+    down1 = mmNet.allErrors(1:2);
+    down2 = mmNet.allErrors(501:502);
+    down3 = mmNet.allErrors(1001:1002);
+    
+    decay1 = (down1(1) - down1(end));
+    decay2 = (down2(1) - down2(end));
+    decay3 = (down3(1) - down3(end));
+    
+    mean_decay = (decay1+decay2+decay3)/3;
+    taus_3 = [taus_3; decay3/down3(1)];
+    avg_taus = [avg_taus; mean_decay];
+    
+%     colors = {'r.', 'y.', 'c.', 'g.', 'm.', 'k.', 'b.'};
+%     for cIdx = 1:7
+%         thisClust = find(pcaNet.clusters == cIdx);
+%         figure(5);
+%         plot(bigY(1, thisClust), bigY(2, thisClust), colors{cIdx})
+%         %         if ts_idx == 2
+%         hold on;
+%         %         end
+%     end
+  figure(888); plot(mmNet.allErrors, 'k-'); xlim([995, 1025]); hold on;
+  plot(1001:1002,mmNet.allErrors(1001:1002),'b*'); hold off;
+  
+  figure(999); plot(mm_etas(1:mIdx), taus_3(1:mIdx)); makepretty; xlim([0,max(mm_etas)]); pause(eps);
+  %     close(5);
+end
+

+ 289 - 0
Agnotron-model/Clustering Network/pcaPlusMN_2D_v4.m

@@ -0,0 +1,289 @@
+%%% pca + mismatch network
+
+% y comes into pca network, which runs one iter. (immediately clusters) 
+% output of pca network is unary context signal
+% y and context signal are then fed into mismatch network
+
+% pca network starts with relatively low excitability, and excitability
+% increases every time total mismatch is over some threshold
+
+
+%%% Important notes:
+%%%     -this specific implementation could fail if we're unlucky with the
+%%%     intial directions of the weights (ie if no weight is close enough
+%%%     to input vector, we'd get into trouble)
+%%%     -this version has been edited to work with very high dim inputs!
+%%%     -v2 tries to introduce normalizations to mm weights
+%%%
+%%%
+
+%%% Things to check!
+%%%     -Mismatch error should be decreasing over time, although that may
+%%%     take a while
+%%%     -Rank of W should initially be close to 100 (or whatever
+%%%     initialized), but should end up close to number of clusters
+%%%     -Weight magnitudes (for W and M) shouldn't be too crazy
+%%%     -Cluster IDs should be consistent - after running network once,
+%%%     re-running on same points should give same clusters (unless network
+%%%     takes very long to converge)
+%%%     -Mismatch error should be much lower if same data run on trained network
+%%%     -Context signal should be relatively sparse!
+
+clear 
+close all
+
+h1 = figure(1);
+set(h1, 'Position', [2209         -31         512       384])
+h2 = figure(5);
+set(h2, 'Position', [1370        -319         822         672])
+% figure(10)
+% figure(25)
+pause
+%% Configs
+seed = 12345; %10 was working for many of these demos %56; %23, 10 for 100D 10 clusters
+    % seed choice affects initial weight direction, which affects peaks of
+    % error plots
+%seed = 1123;
+trackVars = 1; %turning on slows things WAY DOWN
+
+
+% Select Dataset
+%load('genData1.mat')
+% load('genData2D_4c_200ppc.mat')
+load('genData2D_3c_500ppc.mat')
+% load('genData100D_10.mat')
+% load('genData_realSpec_try2.mat')
+% load('genData_realSpec_5c.mat')
+
+
+% Define how data will be presented
+twoClust_dm_unshuff = allPts_dm_unshuff(1:100, :);
+twoClust_unshuff = allPts_unshuff(1:100, :);
+
+shuffIdx = 1:100;%randperm(500);
+
+part1_dm = twoClust_dm_unshuff(shuffIdx,:);
+part1 = twoClust_unshuff(shuffIdx, :);
+
+% bigY is input fed into clustering network - note that it's de-meaned
+bigY = [allPts_dm_unshuff']; %[part1_dm' allPts_dm'];%_unshuff'];% allPts_dm' allPts_dm'];%(1:500,:)'];% allPts_dm'];
+
+% mmY is input fed into mismatch network - just a non-de-meaned version of
+% bigY
+mmY = [allPts_unshuff']; %[part1' allPts'];%_unshuff'];% allPts' allPts'];%(1:500,:)'];%  allPts'];
+
+% PCA CONFIGS
+pcaNet.changeThresh = 1e-4; % for output convergence
+initWeightMag = 0.1; %0.8 for some demos % should be 0.1
+pcaNet.capW = 10;%500; %max weight to each output cell -- CHANGING THIS DIDN'T MAKE A DIFFERENCE
+pcaNet.inhibCap = 10; %NEED 10 FOR SYNTHETIC, 25 FOR REAL... -- CHANGING THIS DOES MATTER
+
+pcaNet.etaW = 0.05; %originally each was 0.5 -- MAKING THIS TOO HIGH DOESN'T MATTER MUCH
+pcaNet.etaM = 0.05; % originally 0.5 -- CHANGING THIS DOESN'T MATTER MUCH
+pcaNet.maxM = 1; 
+pcaNet.maxW = 1;
+
+% MISMATCH CONFIGS
+mmNet.eta = 0.025; % CHANGING THIS DRASTICALLY AFFECTS SHAPE OF ERROR PLOTS
+mmNet.thresh = 0.05; % CHANGING THIS AFFECTS SHAPE OF ERROR PLOTS
+
+% MISTMATCH STRUCTURE CONFIGS
+mmNet.signed_synapses = 1; %force positive weight coeffs 
+mmNet.c_plastic = 1; % 
+mmNet.y_plastic = 0;
+
+% MISMATCH ARCHITECTURE CONFIGS
+yE_type = 'rand'; %'rand' or 'randnorm'
+cE_type = 'rand';  
+yI_type = 'rand';
+cI_type = 'rand';
+yR_type = 'direct'; %'rand' or 'direct' 
+cR_type = 'direct'; %'rand' or 'direct'
+
+% MISMATCH NETWORK CONFIGS
+nCells_y = size(mmY,1); %one input per input dim
+nCells_c = 10; % (500 works on synthetic) 10 clusters in 100D case (nb 100d is a lot)
+nCells_Ny = 5; %not super sure how high or low this needs to be
+nCells_Nc = 5; %but 100 and 100 should be enough to give us convex cone
+iterations = size(bigY,2);
+
+
+% Total Configs -- this doesn't matter, we're always learning
+% (learningThresh = 0)
+sigmaThresh = 2.5; %1 works for seed 10 %2.5;%2.5;%%0.25;%25; %1 worked well 
+deltaLearn = 0.2; %(0.1) % no buffer -> incorrect clustering (not obvious why needed for shuffled data)
+learningThresh = 0;%1; %make it deltaLearn to basically eliminate deltaLearn %0.5; %always learning -> incorrect clustering
+pcaNet.sigmaThresh = sigmaThresh;
+
+
+pcaNet.learning = 1; %this is set to 0 if mismatch is low, 1 if high
+learningSig = 1;
+rng(seed) 
+%% PCA - Setup
+W_init = initWeightMag*randn(nCells_c, nCells_y);
+M_init = 0*initWeightMag*rand(nCells_c); %initially 0
+
+for idx = 1:nCells_c
+    M_init(idx, idx) = 0; %nrns don't drive selves
+end
+
+D_init = zeros(nCells_c,1);
+pcaNet.bigC = zeros(nCells_c, iterations);
+
+pcaNet.W = W_init;
+pcaNet.M = M_init;
+pcaNet.D = D_init;
+
+c = updateC_v5(...
+    pcaNet.W, pcaNet.M, bigY(:,1), pcaNet.changeThresh);
+
+[maxx, thisCluster] = max(c);
+
+if maxx > 0
+    pcaNet.clusters = thisCluster;
+    pcaNet.bigC(thisCluster, 1) = c(thisCluster);
+else
+    pcaNet.clusters = 0;
+end
+
+cT = zeros(size(c));
+cT(thisCluster) = c(thisCluster); % c at timestep t
+y = bigY(:,1);
+
+rng(seed);
+%% Mismatch Setup
+switch yE_type
+    case 'rand'
+        mmNet.we_yn = (rand(nCells_Ny, nCells_y))./nCells_y;
+    case 'randnorm'
+        mmNet.we_yn = (randn(nCells_Ny, nCells_y))./nCells_y;
+end
+
+switch yI_type
+    case 'rand'
+        mmNet.wi_yn = (rand(nCells_Nc, nCells_y))./nCells_y;
+    case 'randnorm'
+        mmNet.wi_yn = (randn(nCells_Nc, nCells_y))./nCells_y;
+end
+
+switch yR_type
+    case 'rand'
+        mmNet.r_yn = rand(nCells_y);
+    case 'direct'
+        mmNet.r_yn = eye(nCells_y);
+end
+    
+switch cE_type
+    case 'rand'
+        mmNet.we_cn = (rand(nCells_Nc, nCells_c));%./nCells_c;
+        %
+    case 'randnorm'
+        mmNet.we_cn = (randn(nCells_Nc, nCells_c));%./nCells_c;
+end
+
+switch cI_type
+    case 'rand'
+        mmNet.wi_cn = (rand(nCells_Ny, nCells_c));%./nCells_c;
+        %
+    case 'randnorm'
+        mmNet.wi_cn = (randn(nCells_Ny, nCells_c));%./nCells_c;
+end
+
+switch cR_type
+    case 'rand'
+        mmNet.r_cn = rand(nCells_c);
+    case 'direct'
+        mmNet.r_cn = eye(nCells_c);
+end
+
+timesteps = length(bigY(1,:));
+
+if trackVars == 1
+    mmNet.Vs_y = zeros(nCells_Ny, timesteps);
+    mmNet.Vs_c = zeros(nCells_Nc, timesteps);
+    mmNet.Fs_y = zeros(nCells_Ny, timesteps);
+    mmNet.Fs_c = zeros(nCells_Nc, timesteps);
+
+
+    mmNet.yWs_e = zeros(nCells_Ny, nCells_y, timesteps+1);
+    mmNet.cWs_e = zeros(nCells_Nc, nCells_c, timesteps+1);
+    mmNet.yWs_i = zeros(nCells_Nc, nCells_y, timesteps+1);
+    mmNet.cWs_i = zeros(nCells_Ny, nCells_c, timesteps+1);
+
+    mmNet.wyChanges_e = zeros(nCells_Ny, nCells_y, timesteps);
+    mmNet.wcChanges_e = zeros(nCells_Nc, nCells_c, timesteps);
+    mmNet.wyChanges_i = zeros(nCells_Nc, nCells_y, timesteps);
+    mmNet.wcChanges_i = zeros(nCells_Ny, nCells_c, timesteps);
+
+    mmNet.yWs_e(:,:,1) = mmNet.we_yn;
+    mmNet.cWs_e(:,:,1) = mmNet.we_cn;
+    mmNet.yWs_i(:,:,1) = mmNet.wi_yn;
+    mmNet.cWs_i(:,:,1) = mmNet.wi_cn;
+end
+
+mmNet.errors_y = zeros(timesteps, 1);
+mmNet.errors_c = zeros(timesteps, 1);
+mmNet.allErrors = zeros(timesteps,1);
+
+% we've already done 1 iter of pca, so here do 1 iter of mm
+mmNet = mismatchIter_v2(cT, mmY, 1, mmNet, trackVars);  
+
+
+disp('setup complete')
+%% Run through algorithm (make functions for PCAIter and MNIter)
+for ts_idx = 2:iterations
+    pcaNet = pcaIter_v6(bigY, ts_idx, pcaNet);
+
+    
+    pcaC = pcaNet.bigC(:, ts_idx);
+
+    pcaC = pcaC>eps;
+    
+    mmNet = mismatchIter_v2(pcaC, mmY, ts_idx, mmNet, trackVars);
+    
+    sigmaNode = mmNet.allErrors(ts_idx);
+    if sigmaNode > sigmaThresh
+        learningSig = learningSig + deltaLearn;
+        if learningSig > 1
+            learningSig = 1;
+        end
+    else
+        learningSig = learningSig - deltaLearn;
+        if learningSig < 0
+            learningSig = 0;
+        end
+    end
+    
+    if learningSig >= learningThresh
+        pcaNet.learning = 1;
+    else 
+        pcaNet.learning = 0;
+    end
+    
+%     agnoPlotting_2D(bigY, mmNet, pcaNet, ts_idx, iterations)
+%     agnoPlotting_100D(bigY, mmNet, pcaNet, ts_idx, iterations)
+end
+
+agnoPlotting_2D(bigY, mmNet, pcaNet, ts_idx, iterations)
+
+%% Plotting
+% 
+% % Cluster ID Distribution
+% figure(2)
+% subplot(2,2,[1 2])
+% histogram(pcaNet.clusters(1:iterations), [-0.5:1:nCells_c+0.5])
+% title('Inputs per Cluster - Total Dataset')
+% xlabel('Cluster ID')
+% ylabel('Number of Inputs')
+% 
+% subplot(2,2,3)
+% histogram(pcaNet.clusters(1:iterations/2), [-0.5:1:nCells_c+0.5])
+% title('Inputs per Cluster - First Half')
+% xlabel('Cluster ID')
+% ylabel('Number of Inputs')
+% 
+% subplot(2,2,4)
+% histogram(pcaNet.clusters((iterations/2)+1:end), [-0.5:1:nCells_c+0.5])
+% title('Inputs per Cluster - Second Half')
+% xlabel('Cluster ID')
+% ylabel('Number of Inputs')

+ 309 - 0
Agnotron-model/Clustering Network/pcaPlusMN_HD_v4.m

@@ -0,0 +1,309 @@
+%%% pca + mismatch network
+
+% y comes into pca network, which runs one iter. (immediately clusters) 
+% output of pca network is unary context signal
+% y and context signal are then fed into mismatch network
+
+% pca network starts with relatively low excitability, and excitability
+% increases every time total mismatch is over some threshold
+
+
+%%% Important notes:
+%%%     -this specific implementation could fail if we're unlucky with the
+%%%     intial directions of the weights (ie if no weight is close enough
+%%%     to input vector, we'd get into trouble)
+%%%     -this version has been edited to work with very high dim inputs!
+%%%     -v2 tries to introduce normalizations to mm weights
+%%%
+%%%
+
+%%% Things to check!
+%%%     -Mismatch error should be decreasing over time, although that may
+%%%     take a while
+%%%     -Rank of W should initially be close to 100 (or whatever
+%%%     initialized), but should end up close to number of clusters (maybe)
+%%%     -Weight magnitudes (for W and M) shouldn't be too crazy
+%%%     -Cluster IDs should be consistent - after running network once,
+%%%     re-running on same points should give same clusters (unless network
+%%%     takes very long to converge)
+%%%     -Mismatch error should be much lower if same data run on trained network
+%%%     -Context signal should be relatively sparse!
+clear 
+close all
+
+figure(1)
+% figure(5)
+% figure(10)
+% figure(25)
+pause
+%% Configs
+seed = 1211; %23, 10 for 100D 10 clusters
+trackVars = 0; %turning on slows things WAY DOWN
+
+errorPlotting = 0;
+
+% load('clusteredSong_150D_10.mat') % very bad when settings same as below 
+% load('genData100D_10_big.mat') %- MAYBE WORKS WELL?
+% load('genData100D_10_small.mat') % excellent
+% load('genData100D_25c_100ppc.mat')
+load('genData100D_10c_250ppc.mat')
+% load('genData_realSpec_try2.mat')
+% load('genData_realSpec_5c.mat')
+
+bigY = [allPts_dm'];%_unshuff'];% allPts_dm_unshuff'];% allPts_dm' allPts_dm'];%(1:500,:)'];% allPts_dm'];
+mmY = [allPts'];%_unshuff'];% allPts_unshuff'];% allPts' allPts'];%(1:500,:)'];%  allPts'];
+trueIDs = [0];%[clustIDs];
+
+
+% PCA CONFIGS
+pcaNet.changeThresh = 1e-4; % for output convergence when pcanet is goin
+initWeightMag = 0.01;
+pcaNet.capW = 25;%500; %max weight to each output cell
+pcaNet.inhibCap = 25; %NEED 10 FOR SYNTHETIC, 25 FOR REAL...
+
+pcaNet.etaW = 0.01;
+pcaNet.etaM = 0.01;
+pcaNet.maxM = 1; 
+pcaNet.maxW = 1;
+
+% MISMATCH CONFIGS
+mmNet.eta = 0.1;
+mmNet.thresh = 0.05; %below this, mm activity is zero (single cell)
+
+% MISTMATCH STRUCTURE CONFIGS
+mmNet.signed_synapses = 1; %force positive weight coeffs 
+mmNet.c_plastic = 1; % 
+mmNet.y_plastic = 0;
+
+% MISMATCH ARCHITECTURE CONFIGS
+yE_type = 'rand'; %'rand' or 'randnorm'
+cE_type = 'rand';  
+yI_type = 'rand';
+cI_type = 'rand';
+yR_type = 'direct'; %'rand' or 'direct' 
+cR_type = 'direct'; %'rand' or 'direct'
+
+% MISMATCH NETWORK CONFIGS
+nCells_y = size(mmY,1); %one input per input dim
+nCells_c = 25; % (500 works on synthetic) 10 clusters in 100D case (nb 100d is a lot) (THIS EQUALS SIZE OF PCA NETWORK)
+nCells_Ny = 100; %not super sure how high or low this needs to be
+nCells_Nc = 100; %but 100 and 100 should be enough to give us convex cone
+iterations = size(bigY,2);
+
+
+% Total Configs
+sigmaThresh = 25;%25; %1 worked well 
+deltaLearn = 0.25; %(0.1) % no buffer -> incorrect clustering
+learningThresh = 0; %always learning -> incorrect clustering
+pcaNet.sigmaThresh = sigmaThresh;
+
+
+% last things - STARTING THESE AT 0 IS ACTUALLY IMPORTANT
+pcaNet.learning = 0; %this is set to 0 if mismatch is low, 1 if high
+learningSig = 0;
+rng(seed) 
+%% PCA - Setup
+W_init = initWeightMag*randn(nCells_c, nCells_y);
+M_init = 0*initWeightMag*rand(nCells_c); %initially 0
+
+for idx = 1:nCells_c
+    M_init(idx, idx) = 0; %nrns don't drive selves
+end
+
+pcaNet.trueIDs = trueIDs;
+D_init = zeros(nCells_c,1);
+pcaNet.bigC = zeros(nCells_c, iterations);
+
+
+pcaNet.W = W_init;
+pcaNet.M = M_init;
+pcaNet.D = D_init;
+
+c = updateC_v5(...
+    pcaNet.W, pcaNet.M, bigY(:,1), pcaNet.changeThresh);
+pcaNet.bigC(:,1) = c;
+
+[maxx, thisCluster] = max(c);
+
+if maxx > 0
+    pcaNet.clusters = thisCluster;
+else
+    pcaNet.clusters = 0;
+end
+
+cT = c; % c at timestep t
+y = bigY(:,1);
+
+rng(seed);
+%% Mismatch Setup
+switch yE_type
+    case 'rand'
+        mmNet.we_yn = (rand(nCells_Ny, nCells_y))./nCells_y;
+    case 'randnorm'
+        mmNet.we_yn = (randn(nCells_Ny, nCells_y))./nCells_y;
+end
+
+switch yI_type
+    case 'rand'
+        mmNet.wi_yn = (rand(nCells_Nc, nCells_y))./nCells_y;
+    case 'randnorm'
+        mmNet.wi_yn = (randn(nCells_Nc, nCells_y))./nCells_y;
+end
+
+switch yR_type
+    case 'rand'
+        mmNet.r_yn = rand(nCells_y);
+    case 'direct'
+        mmNet.r_yn = eye(nCells_y);
+end
+    
+switch cE_type
+    case 'rand'
+        mmNet.we_cn = (rand(nCells_Nc, nCells_c));%./nCells_c;
+        %
+    case 'randnorm'
+        mmNet.we_cn = (randn(nCells_Nc, nCells_c));%./nCells_c;
+end
+
+switch cI_type
+    case 'rand'
+        mmNet.wi_cn = (rand(nCells_Ny, nCells_c));%./nCells_c;
+        %
+    case 'randnorm'
+        mmNet.wi_cn = (randn(nCells_Ny, nCells_c));%./nCells_c;
+end
+
+switch cR_type
+    case 'rand'
+        mmNet.r_cn = rand(nCells_c);
+    case 'direct'
+        mmNet.r_cn = eye(nCells_c);
+end
+
+timesteps = length(bigY(1,:));
+
+if trackVars == 1
+    mmNet.Vs_y = zeros(nCells_Ny, timesteps);
+    mmNet.Vs_c = zeros(nCells_Nc, timesteps);
+    mmNet.Fs_y = zeros(nCells_Ny, timesteps);
+    mmNet.Fs_c = zeros(nCells_Nc, timesteps);
+
+
+    mmNet.yWs_e = zeros(nCells_Ny, nCells_y, timesteps+1);
+    mmNet.cWs_e = zeros(nCells_Nc, nCells_c, timesteps+1);
+    mmNet.yWs_i = zeros(nCells_Nc, nCells_y, timesteps+1);
+    mmNet.cWs_i = zeros(nCells_Ny, nCells_c, timesteps+1);
+
+    mmNet.wyChanges_e = zeros(nCells_Ny, nCells_y, timesteps);
+    mmNet.wcChanges_e = zeros(nCells_Nc, nCells_c, timesteps);
+    mmNet.wyChanges_i = zeros(nCells_Nc, nCells_y, timesteps);
+    mmNet.wcChanges_i = zeros(nCells_Ny, nCells_c, timesteps);
+
+    mmNet.yWs_e(:,:,1) = mmNet.we_yn;
+    mmNet.cWs_e(:,:,1) = mmNet.we_cn;
+    mmNet.yWs_i(:,:,1) = mmNet.wi_yn;
+    mmNet.cWs_i(:,:,1) = mmNet.wi_cn;
+end
+
+mmNet.errors_y = zeros(timesteps, 1);
+mmNet.errors_c = zeros(timesteps, 1);
+mmNet.allErrors = zeros(timesteps,1);
+
+% we've already done 1 iter of pca, so here do 1 iter of mm
+mmNet = mismatchIter_v2(cT, mmY, 1, mmNet, trackVars);  
+
+
+disp('setup complete')
+%% Run through algorithm (make functions for PCAIter and MNIter)
+for ts_idx = 2:iterations
+    pcaNet = pcaIter_v6(bigY, ts_idx, pcaNet);
+
+    
+    pcaC = pcaNet.bigC(:, ts_idx);
+
+    pcaC = pcaC>eps;
+    
+    mmNet = mismatchIter_v2(pcaC, mmY, ts_idx, mmNet, trackVars);
+    
+    sigmaNode = mmNet.allErrors(ts_idx);
+    if sigmaNode > sigmaThresh
+        learningSig = learningSig + deltaLearn;
+        if learningSig > 1
+            learningSig = 1;
+        end
+    else
+        learningSig = learningSig - deltaLearn;
+        if learningSig < 0
+            learningSig = 0;
+        end
+    end
+    
+    if learningSig >= learningThresh
+        pcaNet.learning = 1;
+    else 
+        pcaNet.learning = 0;
+    end
+    
+    if errorPlotting == 1
+        agnoPlotting_100D(bigY, mmNet, pcaNet, ts_idx, iterations)
+    end
+end
+
+%% Plotting
+
+% Cluster ID Distribution
+figure(2)
+subplot(2,2,[1 2])
+histogram(pcaNet.clusters(1:iterations), [-0.5:1:nCells_c+0.5])
+% hold on;
+% rl = refline(0, 250);
+% rl.Color = 'r';
+% rl.LineWidth = 2;
+% xlim([0 500])
+title('Inputs per Cluster - Total Dataset')
+xlabel('Cluster ID')
+ylabel('Number of Inputs')
+% set(gca, 'fontsize', 18)
+% hold off;
+
+subplot(2,2,3)
+histogram(pcaNet.clusters(1:iterations/2), [-0.5:1:nCells_c+0.5])
+title('Inputs per Cluster - First Half')
+xlabel('Cluster ID')
+ylabel('Number of Inputs')
+
+subplot(2,2,4)
+histogram(pcaNet.clusters((iterations/2)+1:end), [-0.5:1:nCells_c+0.5])
+title('Inputs per Cluster - Second Half')
+xlabel('Cluster ID')
+ylabel('Number of Inputs')
+
+
+
+figure(81)
+plot(mmNet.allErrors(1:ts_idx), 'r-');
+% hold on;
+% refline(0, pcaNet.sigmaThresh);
+% hold off;
+title(['Total Error over Time - ts: ' num2str(ts_idx)])
+xlabel('Timestep')
+ylabel('Error')
+xlim([0 iterations])
+ylim([0 1000])
+pause(1e-5)
+
+figure(999)
+silhouette(bigY', pcaNet.clusters);
+
+% Check Cluster Accuracy
+% this is actually not horrible... it would be nice to make it prettier or
+% something
+% toCheck = unique(pcaNet.clusters);
+% 
+% for checkIdx = 1:length(toCheck)
+%     thisCluster = toCheck(checkIdx);
+%     memIDs = find(pcaNet.clusters == thisCluster);
+%     memTruth = trueIDs(memIDs);
+%     figure(1000 + checkIdx);
+%     histogram(memTruth)
+% end

+ 1 - 0
Agnotron-model/Clustering Network/updateC_v5.m

@@ -0,0 +1 @@
+function yT = updateC_v5(wT, mT, xT, changeThresh)
%%%%%
% run this for each timestep to update outputs
% inputs are:
%   wT, weight mat for given timestep
%   mT, lateral weight mat for timestep
%   xT, input for timestep
%   changeThresh - since yT depends on itself, it'll keep updating until it
%       converges (and apparently it will) - so try this
%
% output is:
%   yT, output vector for given timestep
%
% finally, yT, xT are in notation from original chklovskii paper. in pcamn
% framework, yT should be cT, and xT should be yT
%%%%%
n = length(xT); %numInputs
m = size(wT,1); %num out
change = 1000;

oldY = zeros(m,1);
yT = oldY;
it = 1;

while (change > changeThresh) && (it < 1000)
%     y0 = yT;
    for i2 = 1:m
        t1 = wT(i2,:)*xT;
        t2 = mT(i2,:)*yT;
        
        oldY(i2) = yT(i2);
        
        if t1 > t2
            yT(i2) = (t1-t2); 
        else
            yT(i2) = 0;
        end
    end
    
    newChange = 0;
    for i2 = 1:m
        thisTerm = (oldY(i2) - yT(i2)).^2;
        newChange = newChange + thisTerm;
    end
    change = newChange; %set some convergence
    
    it = it + 1;
end

% disp(['it: ' num2str(it)])
end

+ 1 - 0
Agnotron-model/Clustering Network/updateD_v2.m

@@ -0,0 +1 @@
+function dT1 = updateD_v2(dT, yT)
%%%%%
% update D for current timestep based on previous yT and dT
%%%%%
    m = length(yT);

    dT1 = zeros(m,1);
%     for idx = 1:m
    dT1 = dT + yT.^2;
%     end
end

+ 1 - 0
Agnotron-model/Clustering Network/updateM_v4.m

@@ -0,0 +1 @@
+function mT1 = updateM_v4(mT, yT, dT1, inhibCap, etaM, maxM)
%%%%%
% run this for each timestep to update weight mat. for next timestep
% inputs are:
%   mT, lateral weight mat for given timestep
%   yT, outputs for timestep
%   dT1 is memorized activity of neuron up to and including next timestep
%       (ie, updateD must be run before this)
%
% output is:
%   mT1, output lateral weights matrix for next timestep
%%%%%
    m = size(mT,1); %num out

    mT1 = zeros(size(mT));
    for idx = 1:m
        for jdx = 1:m
            if idx ~= jdx
                t1 = mT(idx,jdx); %term 1 of m update 

%                 t2n = yT(idx) * (yT(jdx) - (mT(idx,jdx)*yT(idx))); %num of t2
                
                t2n = etaM* yT(idx) * yT(jdx); % - (mT(idx,jdx)*yT(idx))); %num of t2
                t2d = 1;%dT1(idx); %denom of t2

                t2 = t2n/t2d; %term 2 of m update
                
                

                mT1(idx, jdx) = (t1 + t2);
            else
                mT1(idx, jdx) = 0;
            end
        end
    end

    
    mT1(mT1<=0) = eps; % synapses don't die (negs not okay)
    mT1(mT1>maxM) = maxM; %max weight for single synapse
    
    for idx = 1:m
        for jdx = 1:m
            if idx == jdx %change here for upper triang
                mT1(idx, jdx) = 0; %BUT DIAG STILL 0 
            end
        end
        if inhibCap > 0 %if input cap is 0, no cap 
            mT1(idx,:) = mT1(idx,:) ./ (norm(mT1(idx,:))/inhibCap); %normalize inhib to each
        end
    end
    
    
    if isnan(sum(mT1(:)))
        mT1(isnan(mT1)) = 0;
    end
end

+ 1 - 0
Agnotron-model/Clustering Network/updateW_v5.m

@@ -0,0 +1 @@
+function wT1 = updateW_v5(wT, yT, xT, dT1, etaW, capW, maxW)
%%%%%
% run this for each timestep to update weight mat. for next timestep
% inputs are:
%   wT, weight mat for given timestep
%   yT, outputs for timestep
%   xT, input for timestep
%   dT1 is memorized activity of neuron up to and including next timestep
%       (ie, updateD must be run before this)
%
% output is:
%   wT1, output weights matrix for next timestep
%%%%%

%%% added modulatory term to slow learning...
%%%
%%%

    m = size(wT,1); %num out
    n = size(wT,2); %num in

    wT1 = wT;
    for idx = 1:m
        for jdx = 1:n
            t1 = wT(idx,jdx); %term 1 of w update 

            t2n = etaW* yT(idx) * xT(jdx);% - (wT(idx,jdx)*yT(idx))); %num of t2
            t2d = 1;%dT1(idx) + eps; %denom of t2

            t2 = t2n/t2d; %term 2 of w update %not originally rectified

            wT1(idx, jdx) = t1 + t2;  
        end
    end
   
    
    wT1(wT1==0) = eps; %don't kill synapses (but negs okay)
    wT1(wT1>maxW) = maxW; %max magnitude of input weight
    wT1(wT1< (-1*maxW)) = -1*maxW; %max magnitude of input weight
    
    for idx = 1:m % cap weight to each output
        if norm(wT1(idx, :)) > capW %cap!
            wT1(idx,:) = wT1(idx,:) ./ (norm(wT1(idx,:))/capW);
        end
    end
    
    if isnan(sum(wT1(:)))
        wT1(isnan(wT1)) = eps;
    end
end

+ 36 - 0
Agnotron-model/Sequence Learning Network/EXTRA_plot_total_mismatch.m

@@ -0,0 +1,36 @@
+bout_length = winSize/2; % we just happen to know this
+num_bouts = length(total_mm) / bout_length;
+
+
+boutwise_mm = zeros(num_bouts,1);
+
+for bout_idx = 1:num_bouts
+    bout_start = ((bout_idx-1)*bout_length) + 1;
+    bout_end = bout_start + 19;
+    bout_mm = sum(total_mm(bout_start:bout_end));
+    boutwise_mm(bout_idx) = bout_mm;
+end
+
+c1_boutwise_mm = boutwise_mm(1:2:end);
+c2_boutwise_mm = boutwise_mm(2:2:end);
+
+figure(808);
+subplot(211)
+plot(c1_boutwise_mm)
+hold on;
+plot(196, c1_boutwise_mm(196), 'r*', 'MarkerSize', 12)
+hold off;
+title('Mismatch Response - Song 1')
+ylabel('Response Strength')
+makepretty;
+
+subplot(212)
+plot(c2_boutwise_mm)
+hold on;
+plot(196, c2_boutwise_mm(196), 'r*', 'MarkerSize', 12)
+
+hold off;
+title('Mismatch Response - Song 2')
+ylabel('Response Strength')
+xlabel('Bout number')
+makepretty;

+ 1 - 0
Agnotron-model/Sequence Learning Network/cdIter_v3.m

@@ -0,0 +1 @@
+function [cdNet_new] = cdIter_v2(cdNet_old, ts_idx)

cdNet_new = cdNet_old;

% if ts_idx >= 60
%     disp('')
% end

% get input
if ts_idx > 1
    thisInput = cdNet_old.input(:, ts_idx-1); % this -1 is a REVOLUTION!
else
    thisInput = zeros(size(cdNet_old.input(:,ts_idx)));
end

% resolve activity
ext_FF = cdNet_old.w_PD_CD * thisInput;
rec_FF = cdNet_old.w_CD_CD * cdNet_old.old_cd;

randIn = double(rand(size(cdNet_old.old_cd)) > (1-cdNet_old.pin)); %some small random activ 

% randIn(1:(cdNet_old.nSeed*2)) = 0; % seed neurons can't be randomly activated


y = cdNet_old.old_y + 1/cdNet_old.tau*(-cdNet_old.old_y + cdNet_old.old_cd);
Aadapt = cdNet_old.alpha * y;


inhib_FF = cdNet_old.beta * sum(cdNet_old.old_cd);
extIn = ext_FF + randIn;

netCD = extIn + rec_FF - inhib_FF - Aadapt;

cdNet_new.totalVMat(:, ts_idx) = netCD;

% if ts_idx == 4
%     disp('')
%     figure; plot(netCD);
% end

%rectify
netCD_X = netCD;
netCD(netCD<0) = 0;

% recurrent inhib
inhib_Rec = cdNet_old.gamma * sum(netCD);

% binarize activity
thisCDAct = double(netCD - inhib_Rec > 0);
thisCD_V = netCD_X - inhib_Rec;


cdNet_new.totalActMat(:, ts_idx) = thisCDAct;
cdNet_new.old_y = y;

cdNet_new.totalVMat(:, ts_idx) = thisCD_V;
cdNet_new.old_v = thisCD_V;

+ 1 - 0
Agnotron-model/Sequence Learning Network/cdWeight_plotter_v2.m

@@ -0,0 +1 @@
+function cdWeight_plotter_v2(cdNet, ts_idx, plotInd, figNumber)

wX = cdNet.w_CD_CD;
% [~, sortind] = sortrows(wX);

wplot = wX(plotInd,plotInd); % from early (top) to late (bottom)


figure(figNumber);
imagesc(flipud(wplot))
title(['CD Weights - ' num2str(ts_idx)])
xlabel('CD Cell 1')
ylabel('CD Cell 2')
colorbar;

% pause

+ 1 - 0
Agnotron-model/Sequence Learning Network/mmIter_v1.m

@@ -0,0 +1 @@
+function mmNet_new = mmIter_v1(mmNet_old, cdNet, ts_idx)

mmNet_new = mmNet_old;

% get input
thisInput = mmNet_old.input(:, ts_idx);
thisCDAct = cdNet.totalActMat(:, ts_idx);

% calculate MM
thisMM = (mmNet_old.w_PD_MN * thisInput) + (mmNet_old.w_CD_MN * thisCDAct);

nActiv = thisMM;
nActiv(nActiv<mmNet_old.mmThresh) = 0;

totalMM = sum(nActiv(:));

mmNet_new.totalMMMat(:, ts_idx) = thisMM;
mmNet_new.totalSigMat(ts_idx) = totalMM;

BIN
Agnotron-model/Sequence Learning Network/old/cdIter_v2.m


BIN
Agnotron-model/Sequence Learning Network/old/sequenceCG_v19_wd2Chains.m


BIN
Agnotron-model/Sequence Learning Network/seqCD_plotter_forShow.m


+ 1 - 0
Agnotron-model/Sequence Learning Network/seqCD_plotter_v6.m

@@ -0,0 +1 @@
+function seqCD_plotter_v6(cdNet, mmNet, ts_idx, winSize, plotInd, figNumber)


thisCD = cdNet.totalActMat(:,ts_idx-(winSize-1):ts_idx);
thisTotalMM = mmNet.totalSigMat;

pattIn_mm = mmNet.input(:, ts_idx-(winSize-1):ts_idx);

xplot = thisCD(flipud(plotInd),:); % from early (top) to late (bottom)

figure(figNumber);
subplot(4,1, [1 2])
imagesc(xplot)
title(['Activity - ts: ' num2str(ts_idx-(winSize-1)) '-' num2str(ts_idx)])

% subplot(412)
% imagesc(pattIn_cd)
% title('CD Input')

subplot(413)
plot(thisTotalMM(ts_idx-(winSize-1):ts_idx))
ylim([-0.5 10])
title('Mismatch')

subplot(414)
imagesc(pattIn_mm)
title('MM Input')

BIN
Agnotron-model/Sequence Learning Network/sequenceCG_v20.m


+ 181 - 0
Agnotron-model/Sequence Learning Network/sequenceCG_v20_AA_works.m

@@ -0,0 +1,181 @@
+%% v14 wants two chains for odorants A1 B1, A2 B2
+% very close...
+% i need to 1) create separate variables for 1 and 2, to make the temporal
+% jitter easier and 2) make the temporal jitter more extreme and varied
+
+% now change the learning rule!
+
+clear
+close all
+figure(5)
+set(gca, 'Position', [0.1156    0.1100    0.6889    0.8150])
+figure(10)
+set(gca, 'Position', [0.1300    0.1148    0.7750    0.1312])
+% pause
+
+%%% configs
+
+seed =2509;% 23; %2509; %23;
+
+winSize = 40;
+
+cdNet.nCD = 120;
+cdNet.nMN = 20;
+cdNet.nPD = 4;
+cdNet.gamma = 0.0;  %0 before %  0.045;%045; %0.02 %recurrent inhibition
+cdNet.beta = 0.025;%0.025;15;%0.025;%15; %0.115;  %feed forward inhibition
+cdNet.epsilon = 0.2; %relative strength of heterosynaptic LTD
+cdNet.tau = 30; % 30 %time constant of adaptation
+cdNet.alpha = 20; % 50 % strength of adaptation
+cdNet.max_wCD = 1; %max weight between cd neurons
+cdNet.m = 25;% 25; %10-16 ideal %desired internal synapses per cd nrn
+cdNet.Wmax = cdNet.max_wCD * cdNet.m;
+cdNet.nSeed = 10; % 5 % number of seed neurons per pd output
+cdNet.pin = 0; %.01; 
+
+seedThresh = 0;
+seedDrive = 1;
+
+cdNet.fCoef = 0; 
+cdNet.vCoef = 1;% - cdNet.fCoef;
+
+
+cdNet.eta_cd = 0.25;%15; %0.001; %25;%25;%0.5; %initial learning rate
+deltaM = -0.01;%.025;% -0.05;% -0.5; %change in desired # synapses per mismatch
+
+% m = 25, deltaM = -0.05 is original setting
+
+mmNet.mmThresh = 0; %mismatch below this is treated as 0
+mmNet.sigmaThresh = 3; %total MM above this increases eta (not really)
+mmNet.eta_mm = 0.1; %0.1%0.5; % learning rate for mismatch network
+
+
+rng(seed);
+%%% initialize weight mats
+w0 = 0.0375 .* rand(cdNet.nCD); %0.25*(2*cdNet.Wmax/cdNet.nCD)
+
+cdNet.w_CD_CD = w0;%0.1*rand(cdNet.nCD, cdNet.nCD);
+cdNet.w_PD_CD = zeros(cdNet.nCD, cdNet.nPD);
+cdNet.w_PD_CD(1:cdNet.nSeed, 1) = 1;
+cdNet.w_PD_CD(cdNet.nSeed+1:2*cdNet.nSeed, 2) = 1;
+cdNet.w_PD_CD((2*cdNet.nSeed)+1:3*cdNet.nSeed, 3) = 1;
+cdNet.w_PD_CD((3*cdNet.nSeed)+1:4*cdNet.nSeed, 4) = 1;
+
+mmNet.w_CD_MN = zeros(cdNet.nMN, cdNet.nCD); %start as zeros
+mmNet.w_PD_MN = randn(cdNet.nMN, cdNet.nPD);
+
+
+%%% set input activity
+patternIn = seedThresh*ones(4, 10);
+patternIn(1, 1) = seedDrive;
+patternIn(2, 7) = seedDrive;
+corePatt1 = patternIn;
+
+patternIn = seedThresh*ones(4, 10);
+patternIn(3, 1) = seedDrive;
+patternIn(4, 7) = seedDrive;
+corePatt2 = patternIn;
+
+cdPatt1x1 = [seedThresh*ones(4,1) corePatt1 seedThresh*ones(4,8) corePatt2 seedThresh*ones(4,11)];
+cdPatt1x2 = [seedThresh*ones(4,4) corePatt1 seedThresh*ones(4,7) corePatt2 seedThresh*ones(4,9)];
+cdPatt1x3 = [seedThresh*ones(4,2) corePatt1 seedThresh*ones(4,11) corePatt2 seedThresh*ones(4,7)];
+cdPatt1x4 = [seedThresh*ones(4,3) corePatt1 seedThresh*ones(4,9) corePatt2 seedThresh*ones(4,8)];
+cdPatt1x5 = [seedThresh*ones(4,5) corePatt1 seedThresh*ones(4,7) corePatt2 seedThresh*ones(4,8)];
+
+altCore = seedThresh*ones(4,40);
+altCore(1,3) = seedDrive;
+altCore(4,9) = seedDrive;
+altCore(3,22) = seedDrive;
+
+
+patternIn = zeros(4, 10);
+patternIn(1,1:4) = 1;
+patternIn(2, 7:10) = 1;
+mmCore1 = patternIn;
+
+patternIn = zeros(4,10);
+patternIn(3,1:4) = 1;
+patternIn(4, 7:10) = 1;
+mmCore2 = patternIn;
+
+mmPatt1x1 = [zeros(4,1) mmCore1 zeros(4,8) mmCore2 zeros(4,11)];
+mmPatt1x2 = [zeros(4,4) mmCore1 zeros(4,7) mmCore2 zeros(4,9)];
+mmPatt1x3 = [zeros(4,2) mmCore1 zeros(4,11) mmCore2 zeros(4,7)];
+mmPatt1x4 = [zeros(4,3) mmCore1 zeros(4,9) mmCore2 zeros(4,8)];
+mmPatt1x5 = [zeros(4,5) mmCore1 zeros(4,7) mmCore2 zeros(4,8)];
+
+mmAlt = zeros(4,40);
+mmAlt(1,3:6) = 1;
+mmAlt(4, 9:12) = 1;
+mmAlt(3, 22:25) = 1;
+
+corePattCD = [cdPatt1x1 cdPatt1x2 cdPatt1x3 cdPatt1x4 cdPatt1x5];
+corePattMM = [mmPatt1x1 mmPatt1x2 mmPatt1x3 mmPatt1x4 mmPatt1x5];
+
+cdNet.input = [repmat(corePattCD, [1 39]) repmat(altCore, [1 5])];
+mmNet.input = [repmat(corePattMM, [1 39]) repmat(mmAlt, [1 5])];
+
+iterations = size(cdNet.input, 2);
+%%% final prep for iterations
+
+cdNet.old_cd = zeros(cdNet.nCD, 1);
+cdNet.totalVMat = zeros(cdNet.nCD, iterations);
+cdNet.old_v = zeros(cdNet.nCD, 1);
+cdNet.old_y = zeros(cdNet.nCD, 1);
+cdNet.totalActMat = zeros(cdNet.nCD, iterations);
+mmNet.totalMMMat = zeros(cdNet.nMN, iterations);
+mmNet.totalSigMat = zeros(iterations, 1);
+
+mmHist = zeros(1, winSize);
+
+
+
+inDiff = diff(mmNet.input, 1, 2);
+
+mmNet.onInd1 = find(inDiff(1,:) == 1);
+mmNet.onInd2 = find(inDiff(3,:) == 1);
+
+
+% start the fun!
+for ts_idx = 1:iterations
+    
+    cdNet = cdIter_v3(cdNet, ts_idx);
+    
+    mmNet = mmIter_v1(mmNet, cdNet, ts_idx);
+    
+    mmHist = [mmHist(1:winSize-1) mmNet.totalSigMat(ts_idx)];
+    
+    thisMM = sum(mmHist(:));
+    if thisMM >= mmNet.sigmaThresh
+        cdNet.m = cdNet.m + deltaM; %desired internal synapses per cd nrn
+        cdNet.Wmax = cdNet.max_wCD * cdNet.m;
+        if cdNet.m < 1
+            cdNet.m = 1;
+            cdNet.Wmax = cdNet.max_wCD * cdNet.m;
+        end
+    end
+    
+    [cdNet, mmNet] = updateWs_v2(cdNet, mmNet, ts_idx);
+    
+    
+
+    % plotting?
+    if ts_idx == 1
+        plotInd = flipud([1;2;3;4;5;30;34;44;57;89;27;79;87;88;120;26;69;83;96;32;47;80;21;25;6;7;8;9;10;49;73;85;107;35;98;100;39;55;70;106;28;60;78;45;101;118;11;12;13;14;15;24;56;90;42;63;119;68;113;86;108;58;112;16;17;18;19;20;36;50;71;95;48;102;116;33;53;111;23;104;67;82;22;29;31;37;38;40;41;43;46;51;52;54;59;61;62;64;65;66;72;74;75;76;77;81;84;91;92;93;94;97;99;103;105;109;110;114;115;117]);
+    end
+    
+    if ts_idx > 0
+        if (ts_idx < winSize*5) || (mod(ts_idx, winSize) == 0)
+            cdWeight_plotter_v2(cdNet, ts_idx, plotInd, 5)
+            pause(eps)
+            
+            if mod(ts_idx, winSize) == 0%if  ts_idx >= 15
+                seqCD_plotter_v6(cdNet, mmNet, ts_idx, winSize, plotInd, 10); %fig10 is mismatch and activity
+%                 seqCD_plotter_forShow(cdNet, mmNet, ts_idx, winSize, plotInd, 10); %fig10 is mismatch and activity
+                disp(cdNet.m)
+                pause(eps)
+            end
+        end
+    end
+
+end

BIN
Agnotron-model/Sequence Learning Network/sequenceCG_v20_inputPlay.m


BIN
Agnotron-model/Sequence Learning Network/sortByAct.m


BIN
Agnotron-model/Sequence Learning Network/sortbyCorr.m


+ 1 - 0
Agnotron-model/Sequence Learning Network/updateWs_v2.m

@@ -0,0 +1 @@
+function [cdNet_new, mmNet_new] = updateWs_v2(cdNet_old, mmNet_old, ts_idx)

%%%%% actually, i think that we could split this and fold the cd and mm
%%%%% parts into those respective update functions...

cdNet = cdNet_old;
mmNet = mmNet_old;


thisV = cdNet.totalVMat(:, ts_idx);

if ts_idx == 1
    old_V = zeros(size(thisV));
else
    old_V = cdNet.totalVMat(:, ts_idx-1);
end

% get cd and mm activity
thisCD = cdNet.totalActMat(:, ts_idx);
thisMM = mmNet.totalMMMat(:, ts_idx); %this corresponds to V (un-rectified)

old_cd = cdNet.old_cd;

% get eta's
eta_cd = cdNet.eta_cd;
eta_mm = mmNet.eta_mm;

%%% internal cdNet weights
% stdp from fiete et al 2010
thisV(thisV<0) = 0;
old_V(old_V<0) = 0;

dw_cdcd_s = eta_cd .* (thisCD*old_cd' - old_cd*thisCD'); 
dw_cdcd_v = eta_cd .* ((thisV*old_cd') - (old_V*thisCD'));

% (oldV * cd
% start with some linear combo of learning rules - wtf - not actually used
% (either vCoef or fCoef is set to 0)
dw_cdcd = cdNet.vCoef .* (dw_cdcd_v) + cdNet.fCoef .* (dw_cdcd_s);

% heterosynaptic penalty
dw_hLTDpre = ...
    eta_cd*ones(cdNet.nCD,1)*max(0, sum(cdNet.w_CD_CD+dw_cdcd,1)-cdNet.Wmax);  % Weights leaving
    %  cells (pre)
    
dw_hLTDpost = ...
    eta_cd*max(0, sum(cdNet.w_CD_CD+dw_cdcd,2)-cdNet.Wmax)*ones(1,cdNet.nCD);  % Weights onto
    

% mismatch weight change
dw_cdmn = -1*eta_mm .* (thisCD * thisMM')'; %weight change always opposes direction of mismatch, ignoring syn signs


%%% actually change weights
if eta_cd > 0
    dw_cdcd_total = dw_cdcd - cdNet.epsilon*(dw_hLTDpre+dw_hLTDpost);
    w_CD_CD = cdNet.w_CD_CD + dw_cdcd_total;
    w_CD_CD(w_CD_CD > cdNet.max_wCD) = cdNet.max_wCD;
    w_CD_CD(w_CD_CD < 0) = 0; % cd nets not negative
    
    w_CD_CD = w_CD_CD .* (~eye(cdNet.nCD));
else
    w_CD_CD = cdNet.w_CD_CD;
end

cdNet.w_CD_CD = w_CD_CD;
mmNet.w_CD_MN = mmNet.w_CD_MN + dw_cdmn;

cdNet_new = cdNet;
mmNet_new = mmNet;
    
cdNet_new.old_cd = thisCD;