spade_analysis.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. '''
  2. Script to run the SPADE analysis for Polychronous model
  3. validation (Gutzen et al 2018)
  4. authors: Pietro Quaglio, Robin Gutzen, Michael von Papen, Guido Trensch,
  5. Sonja Grün, Michael Denker
  6. '''
  7. import neo
  8. import quantities as pq
  9. import elephant.spade as spade
  10. import numpy as np
  11. import os
  12. import argparse
  13. import yaml
  14. # Function to load C and Spinnaker data
  15. def load(file_path, t_start=0, t_stop=60000, filter_inh=False,
  16. **kwargs):
  17. f = open(file_path, 'r')
  18. lines = f.readlines()
  19. N = 1000 # neurons
  20. # Read Spike Times
  21. spike_times = [[]] * N
  22. for line in lines:
  23. t_s, t_ms, n = line.split()
  24. t = int(t_s) * 1000 + int(t_ms)
  25. n = int(n)
  26. if t > t_stop:
  27. break
  28. spike_times[n] = spike_times[n] + [t]
  29. # Fill Spike Trains
  30. nbr_neurons = N
  31. if filter_inh:
  32. nbr_neurons = 800
  33. spiketrains = [[]] * nbr_neurons
  34. for n, st in enumerate(spike_times):
  35. if n < 800:
  36. n_type = 'exc'
  37. else:
  38. n_type = 'inh'
  39. if not filter_inh or n_type == 'exc':
  40. spiketrains[n] = neo.SpikeTrain(np.sort(st), units='ms',
  41. t_start=t_start, t_stop=t_stop,
  42. n_type=n_type)
  43. return spiketrains
  44. # Functions to create new folders
  45. def mkdirp(directory):
  46. if not os.path.isdir(directory):
  47. print(directory)
  48. os.mkdir(directory)
  49. def split_path(path):
  50. folders = []
  51. while 1:
  52. path, folder = os.path.split(path)
  53. if folder != "":
  54. folders.append(folder)
  55. else:
  56. if path != "":
  57. folders.append(path)
  58. break
  59. folders.reverse()
  60. return folders
  61. # The parsing arguments
  62. parser = argparse.ArgumentParser(description='Running the SPADE analysis for Polychronous model validation')
  63. # The simulator used to generate data
  64. parser.add_argument('simulator', metavar='simulator', type=str,
  65. help='The simulator used to generate the data to analyze')
  66. # The number of simulation hours after which the data are Recorded
  67. parser.add_argument('hour', metavar='hour', type=int,
  68. help='The number of simulation hours after which the data are Recorded')
  69. # Getting the arguments
  70. args = parser.parse_args()
  71. simulator = args.simulator
  72. hour = args.hour
  73. # Load general SPADE parameters
  74. with open("configfile_spade.yaml", 'r') as stream:
  75. param = yaml.load(stream)
  76. ######## Loading the Data########
  77. # Paths to data
  78. experiment = '../simulation_data/iteration_III/60s_simulation_runs/{}'.format(simulator)
  79. # Paths to the data to analyze
  80. simulation = '/out_firings_after{}h'.format(hour)
  81. data_path = './{}'.format(experiment + simulation)
  82. # Loading the .dat containing spike datadata
  83. spikedata = load(data_path + '.dat', filter_inh=True)
  84. ####### Running SPADE analysis #########
  85. results_spade = spade.spade(
  86. spikedata,binsize=param['binsize']*pq.ms,
  87. winlen=param['winlen'],
  88. min_spikes=param['min_spikes'],
  89. min_neu=param['min_spikes'],
  90. min_occ=param['min_occ'],
  91. n_surr=param['n_surr'],
  92. alpha=param['alpha'],
  93. psr_param=param['psr_param'],
  94. output_format='patterns')
  95. ####### Saving the results ########
  96. # Stripping quantities from lags and times for python 2.7 compatibility
  97. patterns = results_spade['patterns']
  98. for patt in patterns:
  99. patt['lags'] = list(patt['lags'].magnitude)
  100. patt['times'] = list(patt['times'].magnitude)
  101. # Relative path where to store the results
  102. res_dir = './patterns_results/iteration_III/60s_simulation_runs/{}{}'.format(
  103. simulator, simulation)
  104. # if not existing creating path to save results
  105. path_temp = './'
  106. for folder in split_path(res_dir):
  107. path_temp = path_temp + '/' + folder
  108. mkdirp(path_temp)
  109. # Save results
  110. np.save(res_dir + '/patterns.npy', (results_spade, param))