convert_to_nix.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. """
  2. This script loads a complete session in the blackrock format and converts it to a single nix file
  3. """
  4. import os
  5. import copy
  6. import numpy as np
  7. import quantities as pq
  8. import neo
  9. from neo.test.tools import (assert_same_sub_schema,
  10. assert_same_annotations,
  11. assert_same_array_annotations)
  12. from elephant.signal_processing import butter
  13. from reachgraspio import reachgraspio
  14. # Choose which session you want to convert into a nix file
  15. # session = "i140703-001"
  16. session = "l101210-001"
  17. # Input data. i.e., original Blackrock files and odML
  18. dataset_dir = '../datasets_blackrock'
  19. session_path = f'{dataset_dir}/{session}'
  20. odml_dir = os.path.join('..', 'datasets_blackrock')
  21. # Output for the nix files
  22. nix_dataset_dir = '../datasets_nix'
  23. nix_session_path = f'{nix_dataset_dir}/{session}'
  24. ##### LOAD BLACKROCK FILES ############
  25. session = reachgraspio.ReachGraspIO(
  26. filename=session_path,
  27. odml_directory=odml_dir,
  28. verbose=False)
  29. block = session.read_block(lazy=False, load_waveforms=True)
  30. # =============================================================================
  31. # Create offline filtered LFP
  32. #
  33. # Here, we construct one offline filtered LFP from each ns5 (monkey L) or ns6
  34. # (monkey N) raw recording trace. For monkey N, this filtered LFP can be
  35. # compared to the LFPs in the ns2 file (note that monkey L contains only
  36. # behavioral signals in the ns2 file). Also, we assign telling names to each
  37. # Neo AnalogSignal, which is used for plotting later on in this script.
  38. # =============================================================================
  39. nsx_to_anasig_name = {2: 'LFP signal (online filtered)',
  40. 5: 'raw signal',
  41. 6: 'raw signal'}
  42. # this factor was heuristically determined as an approximate shift introduced
  43. # by the online filtering. Here, the offline filtering does not introduce a
  44. # noticable shift, so we set it to zero.
  45. time_shift_factor = 0*pq.ms
  46. filtered_anasig = None
  47. raw_anasig = None
  48. # identify neuronal signals and provide labels for plotting
  49. for anasig in block.segments[0].analogsignals:
  50. # skip non-neuronal signals
  51. if not anasig.annotations['neural_signal']:
  52. continue
  53. # identify nsx source of signals in this AnalogSignal object
  54. if 'nsx' in anasig.annotations:
  55. nsx = anasig.annotations['nsx']
  56. else:
  57. nsx = np.unique(anasig.array_annotations['nsx'])
  58. assert len(nsx) == 1, 'Different nsx sources in AnalogSignal'
  59. nsx = nsx[0]
  60. if nsx == 2:
  61. # AnalogSignal is LFP from ns2
  62. filtered_anasig = anasig
  63. elif nsx in [5, 6]:
  64. # AnalogSignal is raw signal from ns5 or ns6
  65. raw_anasig = anasig
  66. if filtered_anasig is None:
  67. print("Filtering raw time series to obtain LFP")
  68. for f in range(raw_anasig.shape[1]):
  69. # filtering must be done channel by channel for memory reasons (requires approx. 32 GB RAM)
  70. print(f"Processing channel {f}")
  71. filtered_signal = butter(
  72. raw_anasig[:, f],
  73. highpass_freq=None,
  74. lowpass_freq=250 * pq.Hz,
  75. filter_function='sosfiltfilt',
  76. order=4)
  77. # For other filters that may introduce a time shift, here would be the
  78. # place to correct for this shift using:
  79. # ... .time_shift(time_shift_factor)
  80. # Downsampling 30-fold here to get to 1000Hz from 30kHz
  81. downsampled_signal=filtered_signal.downsample(30)
  82. # first run? Create a new Analogsignal
  83. if f == 0:
  84. offline_filtered_anasig = neo.AnalogSignal(
  85. np.zeros((downsampled_signal.shape[0], raw_anasig.shape[1])) *\
  86. downsampled_signal.units,
  87. t_start=downsampled_signal.t_start,
  88. sampling_rate=downsampled_signal.sampling_rate)
  89. # add missing annotations (decision not to put nsx2, since this
  90. # signal is not in the original files
  91. offline_filtered_anasig.annotate(
  92. neural_signal=True,
  93. filter_shift_correction=time_shift_factor,
  94. nsx=-1,
  95. stream_id='',
  96. )
  97. # all array annotations of the raw signal also apply to the filtered
  98. # signal
  99. # offline_filtered_anasig.array_annotations = copy.copy(
  100. # raw_anasig.array_annotations)
  101. offline_filtered_anasig.array_annotate(
  102. **raw_anasig.array_annotations)
  103. offline_filtered_anasig[:, f] = downsampled_signal
  104. if 'nsx' in anasig.annotations:
  105. nsx = anasig.annotations['nsx']
  106. else:
  107. nsx = anasig.array_annotations["nsx"][0]
  108. offline_filtered_anasig.name = f"NeuralTimeSeriesDownsampled"
  109. offline_filtered_anasig.description = "Downsampled continuous neuronal recordings, where the downsampling was " \
  110. "performed off-line during post-processing"
  111. # Attach all offline filtered LFPs to the segment of data
  112. block.segments[0].analogsignals.insert(0, offline_filtered_anasig)
  113. ##### SAVE NIX FILE ###################
  114. nix_filename = nix_session_path + '.nix'
  115. if os.path.exists(nix_filename):
  116. print('Nix file already exists and will not be overwritten.')
  117. else:
  118. with neo.NixIO(nix_filename) as io:
  119. print(f'Saving nix file at {nix_filename}')
  120. io.write_block(block)
  121. ##### VALIDATION OF FILE CONTENT ######
  122. with neo.NixIO(nix_filename, mode='ro') as io:
  123. blocks = io.read_all_blocks()
  124. assert len(blocks) == 1
  125. block_new = blocks[0]
  126. for seg_old, seg_new in zip(block.segments, block_new.segments):
  127. for anasig_old, anasig_new in zip(seg_old.analogsignals, seg_new.analogsignals):
  128. # ignoring differences in the file_origin attribute
  129. anasig_old.file_origin = anasig_new.file_origin
  130. assert_same_sub_schema(anasig_old, anasig_new)
  131. assert_same_annotations(anasig_old, anasig_new)
  132. assert_same_array_annotations(anasig_old, anasig_new)
  133. del anasig_old
  134. print(f'AnalogSignals are equivalent.')
  135. for st_old, st_new in zip(seg_old.spiketrains, seg_new.spiketrains):
  136. # ignoring differences in the file_origin attribute
  137. st_old.file_origin = st_new.file_origin
  138. assert_same_sub_schema(st_old, st_new)
  139. assert_same_annotations(st_old, st_new)
  140. assert_same_array_annotations(st_old, st_new)
  141. del st_old
  142. print(f'Spiketrains are equivalent.')
  143. for ev_old, ev_new in zip(seg_old.events, seg_new.events):
  144. # ignoring differences in the file_origin attribute
  145. ev_old.file_origin = ev_new.file_origin
  146. # ignore list-array type changes
  147. if 'color_codes' in ev_old.annotations:
  148. ev_old.annotations['color_codes'] = list(ev_old.annotations['color_codes'])
  149. assert_same_sub_schema(ev_old, ev_new)
  150. assert_same_annotations(ev_old, ev_new)
  151. assert_same_array_annotations(ev_old, ev_new)
  152. del ev_old
  153. print(f'Events are equivalent.')
  154. for ep_old, ep_new in zip(seg_old.epochs, seg_new.epochs):
  155. # ignoring differences in the file_origin attribute
  156. ep_old.file_origin = ep_new.file_origin
  157. assert_same_sub_schema(ep_old, ep_new)
  158. assert_same_annotations(ep_old, ep_new)
  159. assert_same_array_annotations(ep_old, ep_new)
  160. del ep_old
  161. print(f'Epochs are equivalent.')