neuralynxio.py 104 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416
  1. # -*- coding: utf-8 -*-
  2. """
  3. Class for reading data from Neuralynx files.
  4. This IO supports NCS, NEV and NSE file formats.
  5. Depends on: numpy
  6. Supported: Read
  7. Author: Julia Sprenger, Carlos Canova
  8. Adapted from the exampleIO of python-neo
  9. """
  10. # needed for python 3 compatibility
  11. from __future__ import absolute_import, division
  12. import sys
  13. import os
  14. import warnings
  15. import codecs
  16. import copy
  17. import re
  18. import datetime
  19. import pkg_resources
  20. if hasattr(pkg_resources, 'pkg_resources'):
  21. parse_version = pkg_resources.pkg_resources.parse_version
  22. else:
  23. parse_version = pkg_resources.parse_version
  24. import numpy as np
  25. import quantities as pq
  26. from neo.io.baseio import BaseIO
  27. from neo.core import (Block, Segment, ChannelIndex, AnalogSignal, SpikeTrain,
  28. Event, Unit)
  29. from os import listdir, sep
  30. from os.path import isfile, getsize
  31. import hashlib
  32. import pickle
  33. class NeuralynxIO(BaseIO):
  34. """
  35. Class for reading Neuralynx files.
  36. It enables reading:
  37. - :class:'Block'
  38. - :class:'Segment'
  39. - :class:'AnalogSignal'
  40. - :class:'SpikeTrain'
  41. Usage:
  42. from neo import io
  43. import quantities as pq
  44. import matplotlib.pyplot as plt
  45. session_folder = '../Data/2014-07-24_10-31-02'
  46. NIO = io.NeuralynxIO(session_folder,print_diagnostic = True)
  47. block = NIO.read_block(t_starts = 0.1*pq.s, t_stops = 0.2*pq.s,
  48. events=True)
  49. seg = block.segments[0]
  50. analogsignal = seg.analogsignals[0]
  51. plt.plot(analogsignal.times.rescale(pq.ms), analogsignal.magnitude)
  52. plt.show()
  53. """
  54. is_readable = True # This class can only read data
  55. is_writable = False # write is not supported
  56. # This class is able to directly or indirectly handle the following objects
  57. # You can notice that this greatly simplifies the full Neo object hierarchy
  58. supported_objects = [Segment, AnalogSignal, SpikeTrain, Event]
  59. # This class can return either a Block or a Segment
  60. # The first one is the default ( self.read )
  61. # These lists should go from highest object to lowest object because
  62. # common_io_test assumes it.
  63. readable_objects = [Segment, AnalogSignal, SpikeTrain]
  64. # This class is not able to write objects
  65. writeable_objects = []
  66. has_header = False
  67. is_streameable = False
  68. # This is for GUI stuff : a definition for parameters when reading.
  69. # This dict should be keyed by object (`Block`). Each entry is a list
  70. # of tuple. The first entry in each tuple is the parameter name. The
  71. # second entry is a dict with keys 'value' (for default value),
  72. # and 'label' (for a descriptive name).
  73. # Note that if the highest-level object requires parameters,
  74. # common_io_test will be skipped.
  75. read_params = {
  76. Segment: [('waveforms', {'value': True})],
  77. Block: [('waveforms', {'value': False})]
  78. }
  79. # do not supported write so no GUI stuff
  80. write_params = None
  81. name = 'Neuralynx'
  82. description = 'This IO reads .nse/.ncs/.nev files of the Neuralynx (' \
  83. 'Cheetah) recordings system (tetrodes).'
  84. extensions = ['nse', 'ncs', 'nev', 'ntt']
  85. # mode can be 'file' or 'dir' or 'fake' or 'database'
  86. # the main case is 'file' but some reader are base on a directory or
  87. # a database this info is for GUI stuff also
  88. mode = 'dir'
  89. # hardcoded parameters from manual, which are not present in Neuralynx
  90. # data files
  91. # unit of timestamps in different files
  92. nev_time_unit = pq.microsecond
  93. ncs_time_unit = pq.microsecond
  94. nse_time_unit = pq.microsecond
  95. ntt_time_unit = pq.microsecond
  96. # unit of sampling rate in different files
  97. ncs_sr_unit = pq.Hz
  98. nse_sr_unit = pq.Hz
  99. ntt_sr_unit = pq.Hz
  100. def __init__(self, sessiondir=None, cachedir=None, use_cache='hash',
  101. print_diagnostic=False, filename=None):
  102. """
  103. Arguments:
  104. sessiondir: the directory the files of the recording session are
  105. collected. Default 'None'.
  106. print_diagnostic: indicates, whether information about the
  107. loading of
  108. data is printed in terminal or not. Default 'False'.
  109. cachedir: the directory where metadata about the recording
  110. session is
  111. read from and written to.
  112. use_cache: method used for cache identification. Possible values:
  113. 'hash'/
  114. 'always'/'datesize'/'never'. Default 'hash'
  115. filename: this argument is handles the same as sessiondir and is
  116. only
  117. added for external IO interfaces. The value of
  118. sessiondir
  119. has priority over filename.
  120. """
  121. BaseIO.__init__(self)
  122. # possiblity to provide filename instead of sessiondir for IO
  123. # compatibility
  124. if filename is not None and sessiondir is None:
  125. sessiondir = filename
  126. if sessiondir is None:
  127. raise ValueError('Must provide a directory containing data files of'
  128. ' of one recording session.')
  129. # remove filename if specific file was passed
  130. if any([sessiondir.endswith('.%s' % ext) for ext in self.extensions]):
  131. sessiondir = sessiondir[:sessiondir.rfind(sep)]
  132. # remove / for consistent directory handling
  133. if sessiondir.endswith(sep):
  134. sessiondir = sessiondir.rstrip(sep)
  135. # set general parameters of this IO
  136. self.sessiondir = sessiondir
  137. self.filename = sessiondir.split(sep)[-1]
  138. self._print_diagnostic = print_diagnostic
  139. self.associated = False
  140. self._associate(cachedir=cachedir, usecache=use_cache)
  141. self._diagnostic_print(
  142. 'Initialized IO for session %s' % self.sessiondir)
  143. def read_block(self, lazy=False, cascade=True, t_starts=None,
  144. t_stops=None,
  145. electrode_list=None, unit_list=None, analogsignals=True,
  146. events=False,
  147. waveforms=False):
  148. """
  149. Reads data in a requested time window and returns block with as many
  150. segments
  151. es necessary containing these data.
  152. Arguments:
  153. lazy : Postpone actual reading of the data files. Default 'False'.
  154. cascade : Do not postpone reading subsequent neo types (segments).
  155. Default 'True'.
  156. t_starts : list of quantities or quantity describing the start of
  157. the requested time window to load. If None or [None]
  158. the complete session is loaded. Default 'None'.
  159. t_stops : list of quantities or quantity describing the end of the
  160. requested time window to load. Has to contain the
  161. same number of values as t_starts. If None or [None]
  162. the complete session is loaded. Default 'None'.
  163. electrode_list : list of integers containing the IDs of the
  164. requested to load. If [] or None all available
  165. channels will be loaded.
  166. Default: None.
  167. unit_list : list of integers containing the IDs of the requested
  168. units to load. If [] or None all available units
  169. will be loaded.
  170. Default: None.
  171. analogsignals : boolean, indication whether analogsignals should be
  172. read. Default: True.
  173. events : Loading events. If True all available events in the given
  174. time window will be read. Default: False.
  175. waveforms : Load waveform for spikes in the requested time
  176. window. Default: False.
  177. Returns: Block object containing the requested data in neo structures.
  178. Usage:
  179. from neo import io
  180. import quantities as pq
  181. import matplotlib.pyplot as plt
  182. session_folder = '../Data/2014-07-24_10-31-02'
  183. NIO = io.NeuralynxIO(session_folder,print_diagnostic = True)
  184. block = NIO.read_block(lazy = False, cascade = True,
  185. t_starts = 0.1*pq.s, t_stops = 0.2*pq.s,
  186. electrode_list = [1,5,10],
  187. unit_list = [1,2,3],
  188. events = True, waveforms = True)
  189. plt.plot(block.segments[0].analogsignals[0])
  190. plt.show()
  191. """
  192. # Create block
  193. bl = Block(file_origin=self.sessiondir)
  194. bl.name = self.filename
  195. if not cascade:
  196. return bl
  197. # Checking input of t_start and t_stop
  198. # For lazy users that specify x,x instead of [x],[x] for t_starts,
  199. # t_stops
  200. if t_starts is None:
  201. t_starts = [None]
  202. elif type(t_starts) == pq.Quantity:
  203. t_starts = [t_starts]
  204. elif type(t_starts) != list or any(
  205. [(type(i) != pq.Quantity and i is not None) for i in t_starts]):
  206. raise ValueError('Invalid specification of t_starts.')
  207. if t_stops is None:
  208. t_stops = [None]
  209. elif type(t_stops) == pq.Quantity:
  210. t_stops = [t_stops]
  211. elif type(t_stops) != list or any(
  212. [(type(i) != pq.Quantity and i is not None) for i in t_stops]):
  213. raise ValueError('Invalid specification of t_stops.')
  214. # adapting t_starts and t_stops to known gap times (extracted in
  215. # association process / initialization)
  216. for gap in self.parameters_global['gaps']:
  217. # gap=gap_list[0]
  218. for e in range(len(t_starts)):
  219. t1, t2 = t_starts[e], t_stops[e]
  220. gap_start = gap[1] * self.ncs_time_unit - \
  221. self.parameters_global['t_start']
  222. gap_stop = gap[2] * self.ncs_time_unit - self.parameters_global[
  223. 't_start']
  224. if ((t1 is None and t2 is None)
  225. or (t1 is None and t2 is not None and t2.rescale(
  226. self.ncs_time_unit) > gap_stop)
  227. or (t2 is None and t1 is not None and t1.rescale(
  228. self.ncs_time_unit) < gap_stop)
  229. or (t1 is not None and t2 is not None and t1.rescale(
  230. self.ncs_time_unit) < gap_start
  231. and t2.rescale(self.ncs_time_unit) > gap_stop)):
  232. # adapting first time segment
  233. t_stops[e] = gap_start
  234. # inserting second time segment
  235. t_starts.insert(e + 1, gap_stop)
  236. t_stops.insert(e + 1, t2)
  237. warnings.warn(
  238. 'Substituted t_starts and t_stops in order to skip '
  239. 'gap in recording session.')
  240. # loading all channels if empty electrode_list
  241. if electrode_list == [] or electrode_list is None:
  242. electrode_list = self.parameters_ncs.keys()
  243. # adding a segment for each t_start, t_stop pair
  244. for t_start, t_stop in zip(t_starts, t_stops):
  245. seg = self.read_segment(lazy=lazy, cascade=cascade,
  246. t_start=t_start, t_stop=t_stop,
  247. electrode_list=electrode_list,
  248. unit_list=unit_list,
  249. analogsignals=analogsignals, events=events,
  250. waveforms=waveforms)
  251. bl.segments.append(seg)
  252. # generate units
  253. units = []
  254. channel_unit_collection = {}
  255. for st in [s for seg in bl.segments for s in seg.spiketrains]:
  256. # collecting spiketrains of same channel and unit id to generate
  257. # common unit
  258. chuid = (st.annotations['channel_index'], st.annotations['unit_id'])
  259. if chuid in channel_unit_collection:
  260. channel_unit_collection[chuid].append(st)
  261. else:
  262. channel_unit_collection[chuid] = [st]
  263. for chuid in channel_unit_collection:
  264. sts = channel_unit_collection[chuid]
  265. unit = Unit(name='Channel %i, Unit %i' % chuid)
  266. unit.spiketrains.extend(sts)
  267. units.append(unit)
  268. # generate one channel indexes for each analogsignal
  269. for anasig in [a for seg in bl.segments for a in seg.analogsignals]:
  270. channelids = anasig.annotations['channel_index']
  271. channel_names = ['channel %i' % i for i in channelids]
  272. channelidx = ChannelIndex(index=range(len(channelids)),
  273. channel_names=channel_names,
  274. name='channel ids for all analogsignal '
  275. '"%s"' % anasig.name,
  276. channel_ids=channelids)
  277. channelidx.analogsignals.append(anasig)
  278. bl.channel_indexes.append(channelidx)
  279. # generate channel indexes for units
  280. channelids = [unit.spiketrains[0].annotations['channel_index']
  281. for unit in units]
  282. channel_names = ['channel %i' % i for i in channelids]
  283. channelidx = ChannelIndex(index=range(len(channelids)),
  284. channel_names=channel_names,
  285. name='channel ids for all spiketrains',
  286. channel_ids=channelids)
  287. channelidx.units.extend(units)
  288. bl.channel_indexes.append(channelidx)
  289. bl.create_many_to_one_relationship()
  290. # Adding global parameters to block annotation
  291. bl.annotations.update(self.parameters_global)
  292. return bl
  293. def read_segment(self, lazy=False, cascade=True, t_start=None, t_stop=None,
  294. electrode_list=None, unit_list=None, analogsignals=True,
  295. events=False, waveforms=False):
  296. """Reads one Segment.
  297. The Segment will contain one AnalogSignal for each channel
  298. and will go from t_start to t_stop.
  299. Arguments:
  300. lazy : Postpone actual reading of the data files. Default 'False'.
  301. cascade : Do not postpone reading subsequent neo types (SpikeTrains,
  302. AnalogSignals, Events).
  303. Default 'True'.
  304. t_start : time (quantity) that the Segment begins. Default None.
  305. t_stop : time (quantity) that the Segment ends. Default None.
  306. electrode_list : list of integers containing the IDs of the
  307. requested to load. If [] or None all available
  308. channels will be loaded.
  309. Default: None.
  310. unit_list : list of integers containing the IDs of the requested
  311. units to load. If [] or None all available units
  312. will be loaded. If False, no unit will be loaded.
  313. Default: None.
  314. analogsignals : boolean, indication whether analogsignals should be
  315. read. Default: True.
  316. events : Loading events. If True all available events in the given
  317. time window will be read. Default: False.
  318. waveforms : Load waveform for spikes in the requested time
  319. window. Default: False.
  320. Returns:
  321. Segment object containing neo objects, which contain the data.
  322. """
  323. # input check
  324. # loading all channels if empty electrode_list
  325. if electrode_list == [] or electrode_list is None:
  326. electrode_list = self.parameters_ncs.keys()
  327. elif electrode_list is None:
  328. raise ValueError('Electrode_list can not be None.')
  329. elif [v for v in electrode_list if
  330. v in self.parameters_ncs.keys()] == []:
  331. # warn if non of the requested channels are present in this session
  332. warnings.warn('Requested channels %s are not present in session '
  333. '(contains only %s)' % (
  334. electrode_list, self.parameters_ncs.keys()))
  335. electrode_list = []
  336. seg = Segment(file_origin=self.filename)
  337. if not cascade:
  338. return seg
  339. # generate empty segment for analogsignal collection
  340. empty_seg = Segment(file_origin=self.filename)
  341. # Reading NCS Files #
  342. # selecting ncs files to load based on electrode_list requested
  343. if analogsignals:
  344. for chid in electrode_list:
  345. if chid in self.parameters_ncs:
  346. file_ncs = self.parameters_ncs[chid]['filename']
  347. self.read_ncs(file_ncs, empty_seg, lazy, cascade,
  348. t_start=t_start, t_stop=t_stop)
  349. else:
  350. self._diagnostic_print('Can not load ncs of channel %i. '
  351. 'No corresponding ncs file '
  352. 'present.' % (chid))
  353. # supplementory merge function, should be replaced by neo utility
  354. # function
  355. def merge_analogsignals(anasig_list):
  356. for aid, anasig in enumerate(anasig_list):
  357. anasig.channel_index = None
  358. if aid == 0:
  359. full_analogsignal = anasig
  360. else:
  361. full_analogsignal = full_analogsignal.merge(anasig)
  362. for key in anasig_list[0].annotations.keys():
  363. listified_values = [a.annotations[key] for a in anasig_list]
  364. full_analogsignal.annotations[key] = listified_values
  365. return full_analogsignal
  366. analogsignal = merge_analogsignals(empty_seg.analogsignals)
  367. seg.analogsignals.append(analogsignal)
  368. analogsignal.segment = seg
  369. # Reading NEV Files (Events)#
  370. # reading all files available
  371. if events:
  372. for filename_nev in self.nev_asso:
  373. self.read_nev(filename_nev, seg, lazy, cascade, t_start=t_start,
  374. t_stop=t_stop)
  375. # Reading Spike Data only if requested
  376. if unit_list is not False:
  377. # Reading NSE Files (Spikes)#
  378. # selecting nse files to load based on electrode_list requested
  379. for chid in electrode_list:
  380. if chid in self.parameters_nse:
  381. filename_nse = self.parameters_nse[chid]['filename']
  382. self.read_nse(filename_nse, seg, lazy, cascade,
  383. t_start=t_start, t_stop=t_stop,
  384. waveforms=waveforms)
  385. else:
  386. self._diagnostic_print('Can not load nse of channel %i. '
  387. 'No corresponding nse file '
  388. 'present.' % (chid))
  389. # Reading ntt Files (Spikes)#
  390. # selecting ntt files to load based on electrode_list requested
  391. for chid in electrode_list:
  392. if chid in self.parameters_ntt:
  393. filename_ntt = self.parameters_ntt[chid]['filename']
  394. self.read_ntt(filename_ntt, seg, lazy, cascade,
  395. t_start=t_start, t_stop=t_stop,
  396. waveforms=waveforms)
  397. else:
  398. self._diagnostic_print('Can not load ntt of channel %i. '
  399. 'No corresponding ntt file '
  400. 'present.' % (chid))
  401. return seg
  402. def read_ncs(self, filename_ncs, seg, lazy=False, cascade=True,
  403. t_start=None, t_stop=None):
  404. '''
  405. Reading a single .ncs file from the associated Neuralynx recording
  406. session.
  407. In case of a recording gap between t_start and t_stop, data are only
  408. loaded until gap start.
  409. For loading data across recording gaps use read_block(...).
  410. Arguments:
  411. filename_ncs : Name of the .ncs file to be loaded.
  412. seg : Neo Segment, to which the AnalogSignal containing the data
  413. will be attached.
  414. lazy : Postpone actual reading of the data. Instead provide a dummy
  415. AnalogSignal. Default 'False'.
  416. cascade : Not used in this context. Default: 'True'.
  417. t_start : time or sample (quantity or integer) that the
  418. AnalogSignal begins.
  419. Default None.
  420. t_stop : time or sample (quantity or integer) that the
  421. AnalogSignal ends.
  422. Default None.
  423. Returns:
  424. None
  425. '''
  426. # checking format of filename and correcting if necessary
  427. if filename_ncs[-4:] != '.ncs':
  428. filename_ncs = filename_ncs + '.ncs'
  429. if sep in filename_ncs:
  430. filename_ncs = filename_ncs.split(sep)[-1]
  431. # Extracting the channel id from prescan (association) of ncs files with
  432. # this recording session
  433. chid = self.get_channel_id_by_file_name(filename_ncs)
  434. if chid is None:
  435. raise ValueError('NeuralynxIO is attempting to read a file '
  436. 'not associated to this session (%s).' % (
  437. filename_ncs))
  438. if not cascade:
  439. return
  440. # read data
  441. header_time_data = self.__mmap_ncs_packet_timestamps(filename_ncs)
  442. data = self.__mmap_ncs_data(filename_ncs)
  443. # ensure meaningful values for requested start and stop times
  444. # in case time is provided in samples: transform to absolute time units
  445. if isinstance(t_start, int):
  446. t_start = t_start / self.parameters_ncs[chid]['sampling_rate']
  447. if isinstance(t_stop, int):
  448. t_stop = t_stop / self.parameters_ncs[chid]['sampling_rate']
  449. # rescaling to global start time of recording (time of first sample
  450. # in any file type)
  451. if t_start is None or t_start < (
  452. self.parameters_ncs[chid]['t_start'] -
  453. self.parameters_global[
  454. 't_start']):
  455. t_start = (
  456. self.parameters_ncs[chid]['t_start'] - self.parameters_global[
  457. 't_start'])
  458. if t_start > (
  459. self.parameters_ncs[chid]['t_stop'] -
  460. self.parameters_global[
  461. 't_start']):
  462. raise ValueError(
  463. 'Requested times window (%s to %s) is later than data are '
  464. 'recorded (t_stop = %s) '
  465. 'for file %s.' % (t_start, t_stop,
  466. (self.parameters_ncs[chid]['t_stop'] -
  467. self.parameters_global['t_start']),
  468. filename_ncs))
  469. if t_stop is None or t_stop > (
  470. self.parameters_ncs[chid]['t_stop'] -
  471. self.parameters_global[
  472. 't_start']):
  473. t_stop = (
  474. self.parameters_ncs[chid]['t_stop'] - self.parameters_global[
  475. 't_start'])
  476. if t_stop < (
  477. self.parameters_ncs[chid]['t_start'] -
  478. self.parameters_global['t_start']):
  479. raise ValueError(
  480. 'Requested times window (%s to %s) is earlier than data '
  481. 'are '
  482. 'recorded (t_start = %s) '
  483. 'for file %s.' % (t_start, t_stop,
  484. (self.parameters_ncs[chid]['t_start'] -
  485. self.parameters_global['t_start']),
  486. filename_ncs))
  487. if t_start >= t_stop:
  488. raise ValueError(
  489. 'Requested start time (%s) is later than / equal to stop '
  490. 'time '
  491. '(%s) '
  492. 'for file %s.' % (t_start, t_stop, filename_ncs))
  493. # Extracting data signal in requested time window
  494. unit = pq.dimensionless # default value
  495. if lazy:
  496. sig = []
  497. p_id_start = 0
  498. else:
  499. tstamps = header_time_data * self.ncs_time_unit - \
  500. self.parameters_global['t_start']
  501. # find data packet to start with signal construction
  502. starts = np.where(tstamps <= t_start)[0]
  503. if len(starts) == 0:
  504. self._diagnostic_print(
  505. 'Requested AnalogSignal not present in this time '
  506. 'interval.')
  507. return
  508. else:
  509. # first packet to be included into signal
  510. p_id_start = starts[-1]
  511. # find data packet where signal ends (due to gap or t_stop)
  512. stops = np.where(tstamps >= t_stop)[0]
  513. if len(stops) != 0:
  514. first_stop = [stops[0]]
  515. else:
  516. first_stop = []
  517. # last packet to be included in signal
  518. p_id_stop = min(first_stop + [len(data)])
  519. # search gaps in recording in time range to load
  520. gap_packets = [gap_id[0] for gap_id in
  521. self.parameters_ncs[chid]['gaps'] if
  522. gap_id[0] > p_id_start]
  523. if len(gap_packets) > 0 and min(gap_packets) < p_id_stop:
  524. p_id_stop = min(gap_packets)
  525. warnings.warn(
  526. 'Analogsignalarray was shortened due to gap in '
  527. 'recorded '
  528. 'data '
  529. ' of file %s at packet id %i' % (
  530. filename_ncs, min(gap_packets)))
  531. # search broken packets in time range to load
  532. broken_packets = []
  533. if 'broken_packet' in self.parameters_ncs[chid]:
  534. broken_packets = [packet[0] for packet in
  535. self.parameters_ncs[chid]['broken_packet']
  536. if packet[0] > p_id_start]
  537. if len(broken_packets) > 0 and min(broken_packets) < p_id_stop:
  538. p_id_stop = min(broken_packets)
  539. warnings.warn(
  540. 'Analogsignalarray was shortened due to broken data '
  541. 'packet in recorded data '
  542. ' of file %s at packet id %i' % (
  543. filename_ncs, min(broken_packets)))
  544. # construct signal in valid packet range
  545. sig = np.array(data[p_id_start:p_id_stop + 1], dtype=float)
  546. sig = sig.reshape(len(sig) * len(sig[0]))
  547. # ADBitVolts is not guaranteed to be present in the header!
  548. if 'ADBitVolts' in self.parameters_ncs[chid]:
  549. sig *= self.parameters_ncs[chid]['ADBitVolts']
  550. unit = pq.V
  551. else:
  552. warnings.warn(
  553. 'Could not transform data from file %s into physical '
  554. 'signal. '
  555. 'Missing "ADBitVolts" value in text header.')
  556. # defining sampling rate for rescaling purposes
  557. sampling_rate = self.parameters_ncs[chid]['sampling_unit'][0]
  558. # creating neo AnalogSignal containing data
  559. anasig = AnalogSignal(signal=pq.Quantity(sig, unit, copy=False),
  560. sampling_rate=1 * sampling_rate,
  561. # rescaling t_start to sampling time units
  562. t_start=(header_time_data[
  563. p_id_start] * self.ncs_time_unit -
  564. self.parameters_global[
  565. 't_start']).rescale(
  566. 1 / sampling_rate),
  567. name='channel_%i' % (chid),
  568. channel_index=chid)
  569. # removing protruding parts of first and last data packet
  570. if anasig.t_start < t_start.rescale(anasig.t_start.units):
  571. anasig = anasig.time_slice(t_start.rescale(anasig.t_start.units),
  572. None)
  573. if anasig.t_stop > t_stop.rescale(anasig.t_start.units):
  574. anasig = anasig.time_slice(None,
  575. t_stop.rescale(anasig.t_start.units))
  576. annotations = copy.deepcopy(self.parameters_ncs[chid])
  577. for pop_key in ['sampling_rate', 't_start']:
  578. if pop_key in annotations:
  579. annotations.pop(pop_key)
  580. anasig.annotations.update(annotations)
  581. anasig.annotations['electrode_id'] = chid
  582. # this annotation is necesary for automatic genereation of
  583. # recordingchannels
  584. anasig.annotations['channel_index'] = chid
  585. anasig.segment = seg # needed for merge function of analogsignals
  586. seg.analogsignals.append(anasig)
  587. def read_nev(self, filename_nev, seg, lazy=False, cascade=True,
  588. t_start=None, t_stop=None):
  589. '''
  590. Reads associated nev file and attaches its content as eventarray to
  591. provided neo segment. In constrast to read_ncs times can not be provided
  592. in number of samples as a nev file has no inherent sampling rate.
  593. Arguments:
  594. filename_nev : Name of the .nev file to be loaded.
  595. seg : Neo Segment, to which the Event containing the data
  596. will be attached.
  597. lazy : Postpone actual reading of the data. Instead provide a dummy
  598. Event. Default 'False'.
  599. cascade : Not used in this context. Default: 'True'.
  600. t_start : time (quantity) that the Events begin.
  601. Default None.
  602. t_stop : time (quantity) that the Event end.
  603. Default None.
  604. Returns:
  605. None
  606. '''
  607. if filename_nev[-4:] != '.nev':
  608. filename_nev += '.nev'
  609. if sep in filename_nev:
  610. filename_nev = filename_nev.split(sep)[-1]
  611. if filename_nev not in self.nev_asso:
  612. raise ValueError('NeuralynxIO is attempting to read a file '
  613. 'not associated to this session (%s).' % (
  614. filename_nev))
  615. # # ensure meaningful values for requested start and stop times
  616. # # providing time is samples for nev file does not make sense as we
  617. # don't know the underlying sampling rate
  618. if isinstance(t_start, int):
  619. raise ValueError(
  620. 'Requesting event information from nev file in samples '
  621. 'does '
  622. 'not make sense. '
  623. 'Requested t_start %s' % t_start)
  624. if isinstance(t_stop, int):
  625. raise ValueError(
  626. 'Requesting event information from nev file in samples '
  627. 'does '
  628. 'not make sense. '
  629. 'Requested t_stop %s' % t_stop)
  630. # ensure meaningful values for requested start and stop times
  631. if t_start is None or t_start < (
  632. self.parameters_nev[filename_nev]['t_start'] -
  633. self.parameters_global['t_start']):
  634. t_start = (self.parameters_nev[filename_nev]['t_start'] -
  635. self.parameters_global['t_start'])
  636. if t_start > (self.parameters_nev[filename_nev]['t_stop'] -
  637. self.parameters_global['t_start']):
  638. raise ValueError(
  639. 'Requested times window (%s to %s) is later than data are '
  640. 'recorded (t_stop = %s) '
  641. 'for file %s.' % (t_start, t_stop,
  642. (self.parameters_nev[filename_nev][
  643. 't_stop'] -
  644. self.parameters_global['t_start']),
  645. filename_nev))
  646. if t_stop is None or t_stop > (
  647. self.parameters_nev[filename_nev]['t_stop'] -
  648. self.parameters_global['t_start']):
  649. t_stop = (self.parameters_nev[filename_nev]['t_stop'] -
  650. self.parameters_global['t_start'])
  651. if t_stop < (self.parameters_nev[filename_nev]['t_start'] -
  652. self.parameters_global['t_start']):
  653. raise ValueError(
  654. 'Requested times window (%s to %s) is earlier than data '
  655. 'are '
  656. 'recorded (t_start = %s) '
  657. 'for file %s.' % (t_start, t_stop,
  658. (
  659. self.parameters_nev[filename_nev][
  660. 't_start'] -
  661. self.parameters_global['t_start']),
  662. filename_nev))
  663. if t_start >= t_stop:
  664. raise ValueError(
  665. 'Requested start time (%s) is later than / equal to stop '
  666. 'time '
  667. '(%s) '
  668. 'for file %s.' % (t_start, t_stop, filename_nev))
  669. data = self.__mmap_nev_file(filename_nev)
  670. # Extracting all events for one event type and put it into an event
  671. # array
  672. # TODO: Check if this is the correct way of event creation.
  673. for event_type in self.parameters_nev[filename_nev]['event_types']:
  674. # Extract all time stamps of digital markers and rescaling time
  675. type_mask = [i for i in range(len(data)) if
  676. (data[i][4] == event_type['event_id']
  677. and data[i][5] == event_type['nttl']
  678. and data[i][10].decode('latin-1') == event_type[
  679. 'name'])]
  680. marker_times = [t[3] for t in
  681. data[type_mask]] * self.nev_time_unit - \
  682. self.parameters_global['t_start']
  683. # only consider Events in the requested time window [t_start,
  684. # t_stop]
  685. time_mask = [i for i in range(len(marker_times)) if (
  686. marker_times[i] >= t_start and marker_times[i] <= t_stop)]
  687. marker_times = marker_times[time_mask]
  688. # Do not create an eventarray if there are no events of this type
  689. # in the requested time range
  690. if len(marker_times) == 0:
  691. continue
  692. ev = Event(times=pq.Quantity(marker_times, units=self.nev_time_unit,
  693. dtype="int"),
  694. labels=event_type['name'],
  695. name="Digital Marker " + str(event_type),
  696. file_origin=filename_nev,
  697. marker_id=event_type['event_id'],
  698. digital_marker=True,
  699. analog_marker=False,
  700. nttl=event_type['nttl'])
  701. seg.events.append(ev)
  702. def read_nse(self, filename_nse, seg, lazy=False, cascade=True,
  703. t_start=None, t_stop=None, unit_list=None,
  704. waveforms=False):
  705. '''
  706. Reads nse file and attaches content as spike train to provided neo
  707. segment. Times can be provided in samples (integer values). If the
  708. nse file does not contain a sampling rate value, the ncs sampling
  709. rate on the same electrode is used.
  710. Arguments:
  711. filename_nse : Name of the .nse file to be loaded.
  712. seg : Neo Segment, to which the Spiketrain containing the data
  713. will be attached.
  714. lazy : Postpone actual reading of the data. Instead provide a dummy
  715. SpikeTrain. Default 'False'.
  716. cascade : Not used in this context. Default: 'True'.
  717. t_start : time or sample (quantity or integer) that the
  718. SpikeTrain begins.
  719. Default None.
  720. t_stop : time or sample (quantity or integer) that the SpikeTrain
  721. ends.
  722. Default None.
  723. unit_list : unit ids to be loaded. If [], all units are loaded.
  724. Default None.
  725. waveforms : Load the waveform (up to 32 data points) for each
  726. spike time. Default: False
  727. Returns:
  728. None
  729. '''
  730. if filename_nse[-4:] != '.nse':
  731. filename_nse += '.nse'
  732. if sep in filename_nse:
  733. filename_nse = filename_nse.split(sep)[-1]
  734. # extracting channel id of requested file
  735. channel_id = self.get_channel_id_by_file_name(filename_nse)
  736. if channel_id is not None:
  737. chid = channel_id
  738. else:
  739. # if nse file is empty it is not listed in self.parameters_nse, but
  740. # in self.nse_avail
  741. if filename_nse in self.nse_avail:
  742. warnings.warn('NeuralynxIO is attempting to read an empty '
  743. '(not associated) nse file (%s). '
  744. 'Not loading nse file.' % (filename_nse))
  745. return
  746. else:
  747. raise ValueError('NeuralynxIO is attempting to read a file '
  748. 'not associated to this session (%s).' % (
  749. filename_nse))
  750. # ensure meaningful values for requested start and stop times
  751. # in case time is provided in samples: transform to absolute time units
  752. # ncs sampling rate is best guess if there is no explicit sampling
  753. # rate given for nse values.
  754. if 'sampling_rate' in self.parameters_nse[chid]:
  755. sr = self.parameters_nse[chid]['sampling_rate']
  756. elif chid in self.parameters_ncs and 'sampling_rate' in \
  757. self.parameters_ncs[chid]:
  758. sr = self.parameters_ncs[chid]['sampling_rate']
  759. else:
  760. raise ValueError(
  761. 'No sampling rate present for channel id %i in nse file '
  762. '%s. '
  763. 'Could also not find the sampling rate of the respective '
  764. 'ncs '
  765. 'file.' % (
  766. chid, filename_nse))
  767. if isinstance(t_start, int):
  768. t_start = t_start / sr
  769. if isinstance(t_stop, int):
  770. t_stop = t_stop / sr
  771. # + rescaling global recording start (first sample in any file type)
  772. # This is not optimal, as there is no way to know how long the
  773. # recording lasted after last spike
  774. if t_start is None or t_start < (
  775. self.parameters_nse[chid]['t_first'] -
  776. self.parameters_global[
  777. 't_start']):
  778. t_start = (
  779. self.parameters_nse[chid]['t_first'] - self.parameters_global[
  780. 't_start'])
  781. if t_start > (
  782. self.parameters_nse[chid]['t_last'] -
  783. self.parameters_global['t_start']):
  784. raise ValueError(
  785. 'Requested times window (%s to %s) is later than data are '
  786. 'recorded (t_stop = %s) '
  787. 'for file %s.' % (t_start, t_stop,
  788. (self.parameters_nse[chid]['t_last'] -
  789. self.parameters_global['t_start']),
  790. filename_nse))
  791. if t_stop is None:
  792. t_stop = (sys.maxsize) * self.nse_time_unit
  793. if t_stop is None or t_stop > (
  794. self.parameters_nse[chid]['t_last'] -
  795. self.parameters_global[
  796. 't_start']):
  797. t_stop = (
  798. self.parameters_nse[chid]['t_last'] - self.parameters_global[
  799. 't_start'])
  800. if t_stop < (
  801. self.parameters_nse[chid]['t_first'] -
  802. self.parameters_global[
  803. 't_start']):
  804. raise ValueError(
  805. 'Requested times window (%s to %s) is earlier than data '
  806. 'are recorded (t_start = %s) '
  807. 'for file %s.' % (t_start, t_stop,
  808. (self.parameters_nse[chid]['t_first'] -
  809. self.parameters_global['t_start']),
  810. filename_nse))
  811. if t_start >= t_stop:
  812. raise ValueError(
  813. 'Requested start time (%s) is later than / equal to stop '
  814. 'time '
  815. '(%s) for file %s.' % (t_start, t_stop, filename_nse))
  816. # reading data
  817. [timestamps, channel_ids, cell_numbers, features,
  818. data_points] = self.__mmap_nse_packets(filename_nse)
  819. # load all units available if unit_list==[] or None
  820. if unit_list == [] or unit_list is None:
  821. unit_list = np.unique(cell_numbers)
  822. elif not any([u in cell_numbers for u in unit_list]):
  823. self._diagnostic_print(
  824. 'None of the requested unit ids (%s) present '
  825. 'in nse file %s (contains unit_list %s)' % (
  826. unit_list, filename_nse, np.unique(cell_numbers)))
  827. # extracting spikes unit-wise and generate spiketrains
  828. for unit_i in unit_list:
  829. if not lazy:
  830. # Extract all time stamps of that neuron on that electrode
  831. unit_mask = np.where(cell_numbers == unit_i)[0]
  832. spike_times = timestamps[unit_mask] * self.nse_time_unit
  833. spike_times = spike_times - self.parameters_global['t_start']
  834. time_mask = np.where(np.logical_and(spike_times >= t_start,
  835. spike_times < t_stop))
  836. spike_times = spike_times[time_mask]
  837. else:
  838. spike_times = pq.Quantity([], units=self.nse_time_unit)
  839. # Create SpikeTrain object
  840. st = SpikeTrain(times=spike_times,
  841. t_start=t_start,
  842. t_stop=t_stop,
  843. sampling_rate=self.parameters_ncs[chid][
  844. 'sampling_rate'],
  845. name="Channel %i, Unit %i" % (chid, unit_i),
  846. file_origin=filename_nse,
  847. unit_id=unit_i,
  848. channel_id=chid)
  849. if waveforms and not lazy:
  850. # Collect all waveforms of the specific unit
  851. # For computational reasons: no units, no time axis
  852. st.waveforms = data_points[unit_mask][time_mask]
  853. # TODO: Add units to waveforms (pq.uV?) and add annotation
  854. # left_sweep = x * pq.ms indicating when threshold crossing
  855. # occurred in waveform
  856. st.annotations.update(self.parameters_nse[chid])
  857. st.annotations['electrode_id'] = chid
  858. # This annotations is necessary for automatic generation of
  859. # recordingchannels
  860. st.annotations['channel_index'] = chid
  861. seg.spiketrains.append(st)
  862. def read_ntt(self, filename_ntt, seg, lazy=False, cascade=True,
  863. t_start=None, t_stop=None, unit_list=None,
  864. waveforms=False):
  865. '''
  866. Reads ntt file and attaches content as spike train to provided neo
  867. segment.
  868. Arguments:
  869. filename_ntt : Name of the .ntt file to be loaded.
  870. seg : Neo Segment, to which the Spiketrain containing the data
  871. will be attached.
  872. lazy : Postpone actual reading of the data. Instead provide a dummy
  873. SpikeTrain. Default 'False'.
  874. cascade : Not used in this context. Default: 'True'.
  875. t_start : time (quantity) that the SpikeTrain begins. Default None.
  876. t_stop : time (quantity) that the SpikeTrain ends. Default None.
  877. unit_list : unit ids to be loaded. If [] or None all units are
  878. loaded.
  879. Default None.
  880. waveforms : Load the waveform (up to 32 data points) for each
  881. spike time. Default: False
  882. Returns:
  883. None
  884. '''
  885. if filename_ntt[-4:] != '.ntt':
  886. filename_ntt += '.ntt'
  887. if sep in filename_ntt:
  888. filename_ntt = filename_ntt.split(sep)[-1]
  889. # extracting channel id of requested file
  890. channel_id = self.get_channel_id_by_file_name(filename_ntt)
  891. if channel_id is not None:
  892. chid = channel_id
  893. else:
  894. # if ntt file is empty it is not listed in self.parameters_ntt, but
  895. # in self.ntt_avail
  896. if filename_ntt in self.ntt_avail:
  897. warnings.warn('NeuralynxIO is attempting to read an empty '
  898. '(not associated) ntt file (%s). '
  899. 'Not loading ntt file.' % (filename_ntt))
  900. return
  901. else:
  902. raise ValueError('NeuralynxIO is attempting to read a file '
  903. 'not associated to this session (%s).' % (
  904. filename_ntt))
  905. # ensure meaningful values for requested start and stop times
  906. # in case time is provided in samples: transform to absolute time units
  907. # ncs sampling rate is best guess if there is no explicit sampling
  908. # rate given for ntt values.
  909. if 'sampling_rate' in self.parameters_ntt[chid]:
  910. sr = self.parameters_ntt[chid]['sampling_rate']
  911. elif chid in self.parameters_ncs and 'sampling_rate' in \
  912. self.parameters_ncs[chid]:
  913. sr = self.parameters_ncs[chid]['sampling_rate']
  914. else:
  915. raise ValueError(
  916. 'No sampling rate present for channel id %i in ntt file '
  917. '%s. '
  918. 'Could also not find the sampling rate of the respective '
  919. 'ncs '
  920. 'file.' % (
  921. chid, filename_ntt))
  922. if isinstance(t_start, int):
  923. t_start = t_start / sr
  924. if isinstance(t_stop, int):
  925. t_stop = t_stop / sr
  926. # + rescaling to global recording start (first sample in any
  927. # recording file)
  928. if t_start is None or t_start < (
  929. self.parameters_ntt[chid]['t_first'] -
  930. self.parameters_global[
  931. 't_start']):
  932. t_start = (
  933. self.parameters_ntt[chid]['t_first'] - self.parameters_global[
  934. 't_start'])
  935. if t_start > (
  936. self.parameters_ntt[chid]['t_last'] -
  937. self.parameters_global[
  938. 't_start']):
  939. raise ValueError(
  940. 'Requested times window (%s to %s) is later than data are '
  941. 'recorded (t_stop = %s) '
  942. 'for file %s.' % (t_start, t_stop,
  943. (self.parameters_ntt[chid]['t_last'] -
  944. self.parameters_global['t_start']),
  945. filename_ntt))
  946. if t_stop is None:
  947. t_stop = (sys.maxsize) * self.ntt_time_unit
  948. if t_stop is None or t_stop > (
  949. self.parameters_ntt[chid]['t_last'] -
  950. self.parameters_global[
  951. 't_start']):
  952. t_stop = (
  953. self.parameters_ntt[chid]['t_last'] - self.parameters_global[
  954. 't_start'])
  955. if t_stop < (
  956. self.parameters_ntt[chid]['t_first'] -
  957. self.parameters_global[
  958. 't_start']):
  959. raise ValueError(
  960. 'Requested times window (%s to %s) is earlier than data '
  961. 'are '
  962. 'recorded (t_start = %s) '
  963. 'for file %s.' % (t_start, t_stop,
  964. (self.parameters_ntt[chid]['t_first'] -
  965. self.parameters_global['t_start']),
  966. filename_ntt))
  967. if t_start >= t_stop:
  968. raise ValueError(
  969. 'Requested start time (%s) is later than / equal to stop '
  970. 'time '
  971. '(%s) '
  972. 'for file %s.' % (t_start, t_stop, filename_ntt))
  973. # reading data
  974. [timestamps, channel_ids, cell_numbers, features,
  975. data_points] = self.__mmap_ntt_packets(filename_ntt)
  976. # TODO: When ntt available: Implement 1 RecordingChannelGroup per
  977. # Tetrode, such that each electrode gets its own recording channel
  978. # load all units available if units==[]
  979. if unit_list == [] or unit_list is None:
  980. unit_list = np.unique(cell_numbers)
  981. elif not any([u in cell_numbers for u in unit_list]):
  982. self._diagnostic_print(
  983. 'None of the requested unit ids (%s) present '
  984. 'in ntt file %s (contains units %s)' % (
  985. unit_list, filename_ntt, np.unique(cell_numbers)))
  986. # loading data for each unit and generating spiketrain
  987. for unit_i in unit_list:
  988. if not lazy:
  989. # Extract all time stamps of that neuron on that electrode
  990. mask = np.where(cell_numbers == unit_i)[0]
  991. spike_times = timestamps[mask] * self.ntt_time_unit
  992. spike_times = spike_times - self.parameters_global['t_start']
  993. spike_times = spike_times[np.where(
  994. np.logical_and(spike_times >= t_start,
  995. spike_times < t_stop))]
  996. else:
  997. spike_times = pq.Quantity([], units=self.ntt_time_unit)
  998. # Create SpikeTrain object
  999. st = SpikeTrain(times=spike_times,
  1000. t_start=t_start,
  1001. t_stop=t_stop,
  1002. sampling_rate=self.parameters_ncs[chid][
  1003. 'sampling_rate'],
  1004. name="Channel %i, Unit %i" % (chid, unit_i),
  1005. file_origin=filename_ntt,
  1006. unit_id=unit_i,
  1007. channel_id=chid)
  1008. # Collect all waveforms of the specific unit
  1009. if waveforms and not lazy:
  1010. # For computational reasons: no units, no time axis
  1011. # transposing to adhere to neo guidline, which states that
  1012. # time should be in the first axis.
  1013. # This is stupid and not intuitive.
  1014. st.waveforms = np.array(
  1015. [data_points[t, :, :] for t in range(len(timestamps))
  1016. if cell_numbers[t] == unit_i]).transpose()
  1017. # TODO: Add units to waveforms (pq.uV?) and add annotation
  1018. # left_sweep = x * pq.ms indicating when threshold crossing
  1019. # occurred in waveform
  1020. st.annotations = self.parameters_ntt[chid]
  1021. st.annotations['electrode_id'] = chid
  1022. # This annotations is necessary for automatic generation of
  1023. # recordingchannels
  1024. st.annotations['channel_index'] = chid
  1025. seg.spiketrains.append(st)
  1026. ############# private routines
  1027. # #################################################
  1028. def _associate(self, cachedir=None, usecache='hash'):
  1029. """
  1030. Associates the object with a specified Neuralynx session, i.e., a
  1031. combination of a .nse, .nev and .ncs files. The meta data is read
  1032. into the
  1033. object for future reference.
  1034. Arguments:
  1035. cachedir : Directory for loading and saving hashes of recording
  1036. sessions
  1037. and pickled meta information about files
  1038. extracted during
  1039. association process
  1040. use_cache: method used for cache identification. Possible values:
  1041. 'hash'/
  1042. 'always'/'datesize'/'never'. Default 'hash'
  1043. Returns:
  1044. -
  1045. """
  1046. # If already associated, disassociate first
  1047. if self.associated:
  1048. raise IOError(
  1049. "Trying to associate an already associated NeuralynxIO "
  1050. "object.")
  1051. # Create parameter containers
  1052. # Dictionary that holds different parameters read from the .nev file
  1053. self.parameters_nse = {}
  1054. # List of parameter dictionaries for all potential file types
  1055. self.parameters_ncs = {}
  1056. self.parameters_nev = {}
  1057. self.parameters_ntt = {}
  1058. # combined global parameters
  1059. self.parameters_global = {}
  1060. # Scanning session directory for recorded files
  1061. self.sessionfiles = [f for f in listdir(self.sessiondir) if
  1062. isfile(os.path.join(self.sessiondir, f))]
  1063. # Listing available files
  1064. self.ncs_avail = []
  1065. self.nse_avail = []
  1066. self.nev_avail = []
  1067. self.ntt_avail = []
  1068. # Listing associated (=non corrupted, non empty files)
  1069. self.ncs_asso = []
  1070. self.nse_asso = []
  1071. self.nev_asso = []
  1072. self.ntt_asso = []
  1073. if usecache not in ['hash', 'always', 'datesize', 'never']:
  1074. raise ValueError(
  1075. "Argument value of usecache '%s' is not valid. Accepted "
  1076. "values are 'hash','always','datesize','never'" % usecache)
  1077. if cachedir is None and usecache != 'never':
  1078. raise ValueError('No cache directory provided.')
  1079. # check if there are any changes of the data files -> new data check run
  1080. check_files = True if usecache != 'always' else False # never
  1081. # checking files if usecache=='always'
  1082. if cachedir is not None and usecache != 'never':
  1083. self._diagnostic_print(
  1084. 'Calculating %s of session files to check for cached '
  1085. 'parameter files.' % usecache)
  1086. cachefile = cachedir + sep + self.sessiondir.split(sep)[
  1087. -1] + '/hashkeys'
  1088. if not os.path.exists(cachedir + sep + self.sessiondir.split(sep)[
  1089. -1]):
  1090. os.makedirs(cachedir + sep + self.sessiondir.split(sep)[-1])
  1091. if usecache == 'hash':
  1092. hashes_calc = {}
  1093. # calculates hash of all available files
  1094. for f in self.sessionfiles:
  1095. file_hash = self.hashfile(open(self.sessiondir + sep + f,
  1096. 'rb'), hashlib.sha256())
  1097. hashes_calc[f] = file_hash
  1098. elif usecache == 'datesize':
  1099. hashes_calc = {}
  1100. for f in self.sessionfiles:
  1101. hashes_calc[f] = self.datesizefile(
  1102. self.sessiondir + sep + f)
  1103. # load hashes saved for this session in an earlier loading run
  1104. if os.path.exists(cachefile):
  1105. hashes_read = pickle.load(open(cachefile, 'rb'))
  1106. else:
  1107. hashes_read = {}
  1108. # compare hashes to previously saved meta data und load meta data
  1109. # if no changes occured
  1110. if usecache == 'always' or all([f in hashes_calc and
  1111. f in hashes_read and
  1112. hashes_calc[f] ==
  1113. hashes_read[f]
  1114. for f in self.sessionfiles]):
  1115. check_files = False
  1116. self._diagnostic_print(
  1117. 'Using cached metadata from earlier analysis run in '
  1118. 'file '
  1119. '%s. Skipping file checks.' % cachefile)
  1120. # loading saved parameters
  1121. parameterfile = cachedir + sep + self.sessiondir.split(sep)[
  1122. -1] + '/parameters.cache'
  1123. if os.path.exists(parameterfile):
  1124. parameters_read = pickle.load(open(parameterfile, 'rb'))
  1125. else:
  1126. raise IOError('Inconsistent cache files.')
  1127. for IOdict, dictname in [(self.parameters_global, 'global'),
  1128. (self.parameters_ncs, 'ncs'),
  1129. (self.parameters_nse, 'nse'),
  1130. (self.parameters_nev, 'nev'),
  1131. (self.parameters_ntt, 'ntt')]:
  1132. IOdict.update(parameters_read[dictname])
  1133. self.nev_asso = self.parameters_nev.keys()
  1134. self.ncs_asso = [val['filename'] for val in
  1135. self.parameters_ncs.values()]
  1136. self.nse_asso = [val['filename'] for val in
  1137. self.parameters_nse.values()]
  1138. self.ntt_asso = [val['filename'] for val in
  1139. self.parameters_ntt.values()]
  1140. for filename in self.sessionfiles:
  1141. # Extracting only continuous signal files (.ncs)
  1142. if filename[-4:] == '.ncs':
  1143. self.ncs_avail.append(filename)
  1144. elif filename[-4:] == '.nse':
  1145. self.nse_avail.append(filename)
  1146. elif filename[-4:] == '.nev':
  1147. self.nev_avail.append(filename)
  1148. elif filename[-4:] == '.ntt':
  1149. self.ntt_avail.append(filename)
  1150. else:
  1151. self._diagnostic_print(
  1152. 'Ignoring file of unknown data type %s' % filename)
  1153. if check_files:
  1154. self._diagnostic_print('Starting individual file checks.')
  1155. # =======================================================================
  1156. # # Scan NCS files
  1157. # =======================================================================
  1158. self._diagnostic_print(
  1159. '\nDetected %i .ncs file(s).' % (len(self.ncs_avail)))
  1160. for ncs_file in self.ncs_avail:
  1161. # Loading individual NCS file and extracting parameters
  1162. self._diagnostic_print("Scanning " + ncs_file + ".")
  1163. # Reading file packet headers
  1164. filehandle = self.__mmap_ncs_packet_headers(ncs_file)
  1165. if filehandle is None:
  1166. continue
  1167. try:
  1168. # Checking consistency of ncs file
  1169. self.__ncs_packet_check(filehandle)
  1170. except AssertionError:
  1171. warnings.warn(
  1172. 'Session file %s did not pass data packet check. '
  1173. 'This file can not be loaded.' % ncs_file)
  1174. continue
  1175. # Reading data packet header information and store them in
  1176. # parameters_ncs
  1177. self.__read_ncs_data_headers(filehandle, ncs_file)
  1178. # Reading txt file header
  1179. channel_id = self.get_channel_id_by_file_name(ncs_file)
  1180. self.__read_text_header(ncs_file,
  1181. self.parameters_ncs[channel_id])
  1182. # Check for invalid starting times of data packets in ncs file
  1183. self.__ncs_invalid_first_sample_check(filehandle)
  1184. # Check ncs file for gaps
  1185. self.__ncs_gap_check(filehandle)
  1186. self.ncs_asso.append(ncs_file)
  1187. # =======================================================================
  1188. # # Scan NSE files
  1189. # =======================================================================
  1190. # Loading individual NSE file and extracting parameters
  1191. self._diagnostic_print(
  1192. '\nDetected %i .nse file(s).' % (len(self.nse_avail)))
  1193. for nse_file in self.nse_avail:
  1194. # Loading individual NSE file and extracting parameters
  1195. self._diagnostic_print('Scanning ' + nse_file + '.')
  1196. # Reading file
  1197. filehandle = self.__mmap_nse_packets(nse_file)
  1198. if filehandle is None:
  1199. continue
  1200. try:
  1201. # Checking consistency of nse file
  1202. self.__nse_check(filehandle)
  1203. except AssertionError:
  1204. warnings.warn(
  1205. 'Session file %s did not pass data packet check. '
  1206. 'This file can not be loaded.' % nse_file)
  1207. continue
  1208. # Reading header information and store them in parameters_nse
  1209. self.__read_nse_data_header(filehandle, nse_file)
  1210. # Reading txt file header
  1211. channel_id = self.get_channel_id_by_file_name(nse_file)
  1212. self.__read_text_header(nse_file,
  1213. self.parameters_nse[channel_id])
  1214. # using sampling rate from txt header, as this is not saved
  1215. # in data packets
  1216. if 'SamplingFrequency' in self.parameters_nse[channel_id]:
  1217. self.parameters_nse[channel_id]['sampling_rate'] = \
  1218. (self.parameters_nse[channel_id][
  1219. 'SamplingFrequency'] * self.nse_sr_unit)
  1220. self.nse_asso.append(nse_file)
  1221. # =======================================================================
  1222. # # Scan NEV files
  1223. # =======================================================================
  1224. self._diagnostic_print(
  1225. '\nDetected %i .nev file(s).' % (len(self.nev_avail)))
  1226. for nev_file in self.nev_avail:
  1227. # Loading individual NEV file and extracting parameters
  1228. self._diagnostic_print('Scanning ' + nev_file + '.')
  1229. # Reading file
  1230. filehandle = self.__mmap_nev_file(nev_file)
  1231. if filehandle is None:
  1232. continue
  1233. try:
  1234. # Checking consistency of nev file
  1235. self.__nev_check(filehandle)
  1236. except AssertionError:
  1237. warnings.warn(
  1238. 'Session file %s did not pass data packet check. '
  1239. 'This file can not be loaded.' % nev_file)
  1240. continue
  1241. # Reading header information and store them in parameters_nev
  1242. self.__read_nev_data_header(filehandle, nev_file)
  1243. # Reading txt file header
  1244. self.__read_text_header(nev_file, self.parameters_nev[nev_file])
  1245. self.nev_asso.append(nev_file)
  1246. # =======================================================================
  1247. # # Scan NTT files
  1248. # =======================================================================
  1249. self._diagnostic_print(
  1250. '\nDetected %i .ntt file(s).' % (len(self.ntt_avail)))
  1251. for ntt_file in self.ntt_avail:
  1252. # Loading individual NTT file and extracting parameters
  1253. self._diagnostic_print('Scanning ' + ntt_file + '.')
  1254. # Reading file
  1255. filehandle = self.__mmap_ntt_file(ntt_file)
  1256. if filehandle is None:
  1257. continue
  1258. try:
  1259. # Checking consistency of nev file
  1260. self.__ntt_check(filehandle)
  1261. except AssertionError:
  1262. warnings.warn(
  1263. 'Session file %s did not pass data packet check. '
  1264. 'This file can not be loaded.' % ntt_file)
  1265. continue
  1266. # Reading header information and store them in parameters_nev
  1267. self.__read_ntt_data_header(filehandle, ntt_file)
  1268. # Reading txt file header
  1269. self.__read_ntt_text_header(ntt_file)
  1270. # using sampling rate from txt header, as this is not saved
  1271. # in data packets
  1272. if 'SamplingFrequency' in self.parameters_ntt[channel_id]:
  1273. self.parameters_ntt[channel_id]['sampling_rate'] = \
  1274. (self.parameters_ntt[channel_id][
  1275. 'SamplingFrequency'] * self.ntt_sr_unit)
  1276. self.ntt_asso.append(ntt_file)
  1277. # =======================================================================
  1278. # # Check consistency across files
  1279. # =======================================================================
  1280. # check RECORDING_OPENED / CLOSED times (from txt header) for
  1281. # different files
  1282. for parameter_collection in [self.parameters_ncs,
  1283. self.parameters_nse,
  1284. self.parameters_nev,
  1285. self.parameters_ntt]:
  1286. # check recoding_closed times for specific file types
  1287. if any(np.abs(np.diff([i['recording_opened'] for i in
  1288. parameter_collection.values()]))
  1289. > datetime.timedelta(seconds=1)):
  1290. raise ValueError(
  1291. 'NCS files were opened for recording with a delay '
  1292. 'greater than 0.1 second.')
  1293. # check recoding_closed times for specific file types
  1294. if any(np.diff([i['recording_closed'] for i in
  1295. parameter_collection.values()
  1296. if i[
  1297. 'recording_closed'] is not None]) >
  1298. datetime.timedelta(
  1299. seconds=0.1)):
  1300. raise ValueError(
  1301. 'NCS files were closed after recording with a '
  1302. 'delay '
  1303. 'greater than 0.1 second.')
  1304. # get maximal duration of any file in the recording
  1305. parameter_collection = list(self.parameters_ncs.values()) + \
  1306. list(self.parameters_nse.values()) + \
  1307. list(self.parameters_ntt.values()) + \
  1308. list(self.parameters_nev.values())
  1309. self.parameters_global['recording_opened'] = min(
  1310. [i['recording_opened'] for i in parameter_collection])
  1311. self.parameters_global['recording_closed'] = max(
  1312. [i['recording_closed'] for i in parameter_collection])
  1313. ############ Set up GLOBAL TIMING SCHEME
  1314. # #############################
  1315. for file_type, parameter_collection in [
  1316. ('ncs', self.parameters_ncs), ('nse', self.parameters_nse),
  1317. ('nev', self.parameters_nev), ('ntt', self.parameters_ntt)]:
  1318. # check starting times
  1319. name_t1, name_t2 = ['t_start', 't_stop'] if (
  1320. file_type != 'nse' and file_type != 'ntt') \
  1321. else ['t_first', 't_last']
  1322. # checking if files of same type start at same time point
  1323. if file_type != 'nse' and file_type != 'ntt' \
  1324. and len(np.unique(np.array(
  1325. [i[name_t1].magnitude for i in
  1326. parameter_collection.values()]))) > 1:
  1327. raise ValueError(
  1328. '%s files do not start at same time point.' %
  1329. file_type)
  1330. # saving t_start and t_stop for each file type available
  1331. if len([i[name_t1] for i in parameter_collection.values()]):
  1332. self.parameters_global['%s_t_start' % file_type] = min(
  1333. [i[name_t1]
  1334. for i in parameter_collection.values()])
  1335. self.parameters_global['%s_t_stop' % file_type] = min(
  1336. [i[name_t2]
  1337. for i in parameter_collection.values()])
  1338. # extracting minimial t_start and maximal t_stop value for this
  1339. # recording session
  1340. self.parameters_global['t_start'] = min(
  1341. [self.parameters_global['%s_t_start' % t]
  1342. for t in ['ncs', 'nev', 'nse', 'ntt']
  1343. if '%s_t_start' % t in self.parameters_global])
  1344. self.parameters_global['t_stop'] = max(
  1345. [self.parameters_global['%s_t_stop' % t]
  1346. for t in ['ncs', 'nev', 'nse', 'ntt']
  1347. if '%s_t_start' % t in self.parameters_global])
  1348. # checking gap consistency across ncs files
  1349. # check number of gaps detected
  1350. if len(np.unique([len(i['gaps']) for i in
  1351. self.parameters_ncs.values()])) != 1:
  1352. raise ValueError('NCS files contain different numbers of gaps!')
  1353. # check consistency of gaps across files and create global gap
  1354. # collection
  1355. self.parameters_global['gaps'] = []
  1356. for g in range(len(list(self.parameters_ncs.values())[0]['gaps'])):
  1357. integrated = False
  1358. gap_stats = np.unique(
  1359. [i['gaps'][g] for i in self.parameters_ncs.values()],
  1360. return_counts=True)
  1361. if len(gap_stats[0]) != 3 or len(np.unique(gap_stats[1])) != 1:
  1362. raise ValueError(
  1363. 'Gap number %i is not consistent across NCS '
  1364. 'files.' % (
  1365. g))
  1366. else:
  1367. # check if this is second part of already existing gap
  1368. for gg in range(len(self.parameters_global['gaps'])):
  1369. globalgap = self.parameters_global['gaps'][gg]
  1370. # check if stop time of first is start time of second
  1371. # -> continuous gap
  1372. if globalgap[2] == \
  1373. list(self.parameters_ncs.values())[0]['gaps'][
  1374. g][1]:
  1375. self.parameters_global['gaps'][gg] = \
  1376. self.parameters_global['gaps'][gg][:2] + (
  1377. list(self.parameters_ncs.values())[0][
  1378. 'gaps'][g][
  1379. 2],)
  1380. integrated = True
  1381. break
  1382. if not integrated:
  1383. # add as new gap if this is not a continuation of
  1384. # existing global gap
  1385. self.parameters_global['gaps'].append(
  1386. list(self.parameters_ncs.values())[0][
  1387. 'gaps'][g])
  1388. # save results of association for future analysis together with hash
  1389. # values for change tracking
  1390. if cachedir is not None and usecache != 'never':
  1391. pickle.dump({'global': self.parameters_global,
  1392. 'ncs': self.parameters_ncs,
  1393. 'nev': self.parameters_nev,
  1394. 'nse': self.parameters_nse,
  1395. 'ntt': self.parameters_ntt},
  1396. open(cachedir + sep + self.sessiondir.split(sep)[
  1397. -1] + '/parameters.cache', 'wb'))
  1398. if usecache != 'always':
  1399. pickle.dump(hashes_calc, open(
  1400. cachedir + sep + self.sessiondir.split(sep)[
  1401. -1] + '/hashkeys', 'wb'))
  1402. self.associated = True
  1403. #################### private routines
  1404. # #########################################################ü
  1405. ################# Memory Mapping Methods
  1406. def __mmap_nse_packets(self, filename):
  1407. """
  1408. Memory map of the Neuralynx .ncs file optimized for extraction of
  1409. data packet headers
  1410. Reading standard dtype improves speed, but timestamps need to be
  1411. reconstructed
  1412. """
  1413. filesize = getsize(self.sessiondir + sep + filename) # in byte
  1414. if filesize > 16384:
  1415. data = np.memmap(self.sessiondir + sep + filename,
  1416. dtype='<u2',
  1417. shape=((filesize - 16384) // 2 // 56, 56),
  1418. mode='r', offset=16384)
  1419. # reconstructing original data
  1420. # first 4 ints -> timestamp in microsec
  1421. timestamps = data[:, 0] + data[:, 1] * 2 ** 16 + data[:,
  1422. 2] * 2 ** 32 + \
  1423. data[
  1424. :,
  1425. 3] * 2 ** 48
  1426. channel_id = data[:, 4] + data[:, 5] * 2 ** 16
  1427. cell_number = data[:, 6] + data[:, 7] * 2 ** 16
  1428. features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in
  1429. range(8, 23, 2)]
  1430. features = np.array(features, dtype='i4')
  1431. data_points = data[:, 24:56].astype('i2')
  1432. del data
  1433. return timestamps, channel_id, cell_number, features, data_points
  1434. else:
  1435. return None
  1436. def __mmap_ncs_data(self, filename):
  1437. """ Memory map of the Neuralynx .ncs file optimized for data
  1438. extraction"""
  1439. if getsize(self.sessiondir + sep + filename) > 16384:
  1440. data = np.memmap(self.sessiondir + sep + filename,
  1441. dtype=np.dtype(('i2', (522))), mode='r',
  1442. offset=16384)
  1443. # removing data packet headers and flattening data
  1444. return data[:, 10:]
  1445. else:
  1446. return None
  1447. def __mmap_ncs_packet_headers(self, filename):
  1448. """
  1449. Memory map of the Neuralynx .ncs file optimized for extraction of
  1450. data packet headers
  1451. Reading standard dtype improves speed, but timestamps need to be
  1452. reconstructed
  1453. """
  1454. filesize = getsize(self.sessiondir + sep + filename) # in byte
  1455. if filesize > 16384:
  1456. data = np.memmap(self.sessiondir + sep + filename,
  1457. dtype='<u4',
  1458. shape=((filesize - 16384) // 4 // 261, 261),
  1459. mode='r', offset=16384)
  1460. ts = data[:, 0:2]
  1461. multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
  1462. axis=0)
  1463. timestamps = np.sum(ts * multi, axis=1)
  1464. # timestamps = data[:,0] + (data[:,1] *2**32)
  1465. header_u4 = data[:, 2:5]
  1466. return timestamps, header_u4
  1467. else:
  1468. return None
  1469. def __mmap_ncs_packet_timestamps(self, filename):
  1470. """
  1471. Memory map of the Neuralynx .ncs file optimized for extraction of
  1472. data packet headers
  1473. Reading standard dtype improves speed, but timestamps need to be
  1474. reconstructed
  1475. """
  1476. filesize = getsize(self.sessiondir + sep + filename) # in byte
  1477. if filesize > 16384:
  1478. data = np.memmap(self.sessiondir + sep + filename,
  1479. dtype='<u4',
  1480. shape=(int((filesize - 16384) / 4 / 261), 261),
  1481. mode='r', offset=16384)
  1482. ts = data[:, 0:2]
  1483. multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
  1484. axis=0)
  1485. timestamps = np.sum(ts * multi, axis=1)
  1486. # timestamps = data[:,0] + data[:,1]*2**32
  1487. return timestamps
  1488. else:
  1489. return None
  1490. def __mmap_nev_file(self, filename):
  1491. """ Memory map the Neuralynx .nev file """
  1492. nev_dtype = np.dtype([
  1493. ('reserved', '<i2'),
  1494. ('system_id', '<i2'),
  1495. ('data_size', '<i2'),
  1496. ('timestamp', '<u8'),
  1497. ('event_id', '<i2'),
  1498. ('ttl_input', '<i2'),
  1499. ('crc_check', '<i2'),
  1500. ('dummy1', '<i2'),
  1501. ('dummy2', '<i2'),
  1502. ('extra', '<i4', (8,)),
  1503. ('event_string', 'a128'),
  1504. ])
  1505. if getsize(self.sessiondir + sep + filename) > 16384:
  1506. return np.memmap(self.sessiondir + sep + filename,
  1507. dtype=nev_dtype, mode='r', offset=16384)
  1508. else:
  1509. return None
  1510. def __mmap_ntt_file(self, filename):
  1511. """ Memory map the Neuralynx .nse file """
  1512. nse_dtype = np.dtype([
  1513. ('timestamp', '<u8'),
  1514. ('sc_number', '<u4'),
  1515. ('cell_number', '<u4'),
  1516. ('params', '<u4', (8,)),
  1517. ('data', '<i2', (32, 4)),
  1518. ])
  1519. if getsize(self.sessiondir + sep + filename) > 16384:
  1520. return np.memmap(self.sessiondir + sep + filename,
  1521. dtype=nse_dtype, mode='r', offset=16384)
  1522. else:
  1523. return None
  1524. def __mmap_ntt_packets(self, filename):
  1525. """
  1526. Memory map of the Neuralynx .ncs file optimized for extraction of
  1527. data packet headers
  1528. Reading standard dtype improves speed, but timestamps need to be
  1529. reconstructed
  1530. """
  1531. filesize = getsize(self.sessiondir + sep + filename) # in byte
  1532. if filesize > 16384:
  1533. data = np.memmap(self.sessiondir + sep + filename,
  1534. dtype='<u2',
  1535. shape=((filesize - 16384) / 2 / 152, 152),
  1536. mode='r', offset=16384)
  1537. # reconstructing original data
  1538. # first 4 ints -> timestamp in microsec
  1539. timestamps = data[:, 0] + data[:, 1] * 2 ** 16 + \
  1540. data[:, 2] * 2 ** 32 + data[:, 3] * 2 ** 48
  1541. channel_id = data[:, 4] + data[:, 5] * 2 ** 16
  1542. cell_number = data[:, 6] + data[:, 7] * 2 ** 16
  1543. features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in
  1544. range(8, 23, 2)]
  1545. features = np.array(features, dtype='i4')
  1546. data_points = data[:, 24:152].astype('i2').reshape((4, 32))
  1547. del data
  1548. return timestamps, channel_id, cell_number, features, data_points
  1549. else:
  1550. return None
  1551. # ___________________________ header extraction __________________________
  1552. def __read_text_header(self, filename, parameter_dict):
  1553. # Reading main file header (plain text, 16kB)
  1554. text_header = codecs.open(self.sessiondir + sep + filename, 'r',
  1555. 'latin-1').read(16384)
  1556. # necessary text encoding depends on Python version
  1557. if sys.version_info.major < 3:
  1558. text_header = text_header.encode('latin-1')
  1559. parameter_dict['cheetah_version'] = \
  1560. self.__get_cheetah_version_from_txt_header(text_header, filename)
  1561. parameter_dict.update(self.__get_filename_and_times_from_txt_header(
  1562. text_header, parameter_dict['cheetah_version']))
  1563. # separating lines of header and ignoring last line (fill), check if
  1564. # Linux or Windows OS
  1565. if sep == '/':
  1566. text_header = text_header.split('\r\n')[:-1]
  1567. if sep == '\\':
  1568. text_header = text_header.split('\n')[:-1]
  1569. # minor parameters possibly saved in header (for any file type)
  1570. minor_keys = ['AcqEntName',
  1571. 'FileType',
  1572. 'FileVersion',
  1573. 'RecordSize',
  1574. 'HardwareSubSystemName',
  1575. 'HardwareSubSystemType',
  1576. 'SamplingFrequency',
  1577. 'ADMaxValue',
  1578. 'ADBitVolts',
  1579. 'NumADChannels',
  1580. 'ADChannel',
  1581. 'InputRange',
  1582. 'InputInverted',
  1583. 'DSPLowCutFilterEnabled',
  1584. 'DspLowCutFrequency',
  1585. 'DspLowCutNumTaps',
  1586. 'DspLowCutFilterType',
  1587. 'DSPHighCutFilterEnabled',
  1588. 'DspHighCutFrequency',
  1589. 'DspHighCutNumTaps',
  1590. 'DspHighCutFilterType',
  1591. 'DspDelayCompensation',
  1592. 'DspFilterDelay_\xb5s',
  1593. 'DisabledSubChannels',
  1594. 'WaveformLength',
  1595. 'AlignmentPt',
  1596. 'ThreshVal',
  1597. 'MinRetriggerSamples',
  1598. 'SpikeRetriggerTime',
  1599. 'DualThresholding',
  1600. 'Feature Peak 0',
  1601. 'Feature Valley 1',
  1602. 'Feature Energy 2',
  1603. 'Feature Height 3',
  1604. 'Feature NthSample 4',
  1605. 'Feature NthSample 5',
  1606. 'Feature NthSample 6',
  1607. 'Feature NthSample 7',
  1608. 'SessionUUID',
  1609. 'FileUUID',
  1610. 'CheetahRev',
  1611. 'ProbeName',
  1612. 'OriginalFileName',
  1613. 'TimeCreated',
  1614. 'TimeClosed',
  1615. 'ApplicationName',
  1616. 'AcquisitionSystem',
  1617. 'ReferenceChannel']
  1618. # extracting minor key values of header (only taking into account
  1619. # non-empty lines)
  1620. for i, minor_entry in enumerate(text_header):
  1621. if minor_entry == '' or minor_entry[0] == '#':
  1622. continue
  1623. matching_key = [key for key in minor_keys if
  1624. minor_entry.strip('-').startswith(key)]
  1625. if len(matching_key) == 1:
  1626. matching_key = matching_key[0]
  1627. minor_value = minor_entry.split(matching_key)[1].strip(
  1628. ' ').rstrip(' ')
  1629. # determine data type of entry
  1630. if minor_value.isdigit():
  1631. # converting to int if possible
  1632. minor_value = int(minor_value)
  1633. else:
  1634. # converting to float if possible
  1635. try:
  1636. minor_value = float(minor_value)
  1637. except:
  1638. pass
  1639. if matching_key in parameter_dict:
  1640. warnings.warn(
  1641. 'Multiple entries for %s in text header of %s' % (
  1642. matching_key, filename))
  1643. else:
  1644. parameter_dict[matching_key] = minor_value
  1645. elif len(matching_key) > 1:
  1646. raise ValueError(
  1647. 'Inconsistent minor key list for text header '
  1648. 'interpretation.')
  1649. else:
  1650. warnings.warn(
  1651. 'Skipping text header entry %s, because it is not in '
  1652. 'minor key list' % minor_entry)
  1653. self._diagnostic_print(
  1654. 'Successfully decoded text header of file (%s).' % filename)
  1655. def __get_cheetah_version_from_txt_header(self, text_header, filename):
  1656. version_regex = re.compile('((-CheetahRev )|'
  1657. '(ApplicationName Cheetah "))'
  1658. '(?P<version>\d{1,3}\.\d{1,3}\.\d{1,3})')
  1659. match = version_regex.search(text_header)
  1660. if match:
  1661. return match.groupdict()['version']
  1662. else:
  1663. raise ValueError('Can not extract Cheetah version from file '
  1664. 'header of file %s' % filename)
  1665. def __get_filename_and_times_from_txt_header(self, text_header, version):
  1666. if parse_version(version) <= parse_version('5.6.4'):
  1667. datetime1_regex = re.compile('## Time Opened \(m/d/y\): '
  1668. '(?P<date>\S+)'
  1669. ' \(h:m:s\.ms\) '
  1670. '(?P<time>\S+)')
  1671. datetime2_regex = re.compile('## Time Closed \(m/d/y\): '
  1672. '(?P<date>\S+)'
  1673. ' \(h:m:s\.ms\) '
  1674. '(?P<time>\S+)')
  1675. filename_regex = re.compile('## File Name (?P<filename>\S+)')
  1676. datetimeformat = '%m/%d/%Y %H:%M:%S.%f'
  1677. else:
  1678. datetime1_regex = re.compile('-TimeCreated '
  1679. '(?P<date>\S+) '
  1680. '(?P<time>\S+)')
  1681. datetime2_regex = re.compile('-TimeClosed '
  1682. '(?P<date>\S+) '
  1683. '(?P<time>\S+)')
  1684. filename_regex = re.compile('-OriginalFileName '
  1685. '"?(?P<filename>\S+)"?')
  1686. datetimeformat = '%Y/%m/%d %H:%M:%S'
  1687. matchtime1 = datetime1_regex.search(text_header).groupdict()
  1688. matchtime2 = datetime2_regex.search(text_header).groupdict()
  1689. matchfilename = filename_regex.search(text_header)
  1690. filename = matchfilename.groupdict()['filename']
  1691. if '## Time Closed File was not closed properly' in text_header:
  1692. warnings.warn('Text header of file %s does not contain recording '
  1693. 'closed time. File was not closed properly.'
  1694. '' % filename)
  1695. datetime1 = datetime.datetime.strptime(matchtime1['date'] + ' ' +
  1696. matchtime1['time'],
  1697. datetimeformat)
  1698. datetime2 = datetime.datetime.strptime(matchtime2['date'] + ' ' +
  1699. matchtime2['time'],
  1700. datetimeformat)
  1701. output = {'recording_opened': datetime1,
  1702. 'recording_closed': datetime2,
  1703. 'file_created': datetime1,
  1704. 'file_closed': datetime2,
  1705. 'recording_file_name': filename}
  1706. return output
  1707. def __read_ncs_data_headers(self, filehandle, filename):
  1708. '''
  1709. Reads the .ncs data block headers and stores the information in the
  1710. object's parameters_ncs dictionary.
  1711. Args:
  1712. filehandle (file object):
  1713. Handle to the already opened .ncs file.
  1714. filename (string):
  1715. Name of the ncs file.
  1716. Returns:
  1717. dict of extracted data
  1718. '''
  1719. timestamps = filehandle[0]
  1720. header_u4 = filehandle[1]
  1721. channel_id = header_u4[0][0]
  1722. sr = header_u4[0][1] # in Hz
  1723. t_start = timestamps[0] # in microseconds
  1724. # calculating corresponding time stamp of first sample, that was not
  1725. # recorded any more
  1726. # t_stop= time of first sample in last packet +(#samples per packet *
  1727. # conversion factor / sampling rate)
  1728. # conversion factor is needed as times are recorded in ms
  1729. t_stop = timestamps[-1] + (
  1730. (header_u4[-1][2]) * (
  1731. 1 / self.ncs_time_unit.rescale(pq.s)).magnitude /
  1732. header_u4[-1][1])
  1733. if channel_id in self.parameters_ncs:
  1734. raise ValueError(
  1735. 'Detected multiple ncs files for channel_id %i.'
  1736. % channel_id)
  1737. else:
  1738. sampling_unit = [pq.CompoundUnit('%f*%s'
  1739. '' % (sr,
  1740. self.ncs_sr_unit.symbol))]
  1741. sampling_rate = sr * self.ncs_sr_unit
  1742. self.parameters_ncs[channel_id] = {'filename': filename,
  1743. 't_start': t_start *
  1744. self.ncs_time_unit,
  1745. 't_stop': t_stop *
  1746. self.ncs_time_unit,
  1747. 'sampling_rate': sampling_rate,
  1748. 'sampling_unit': sampling_unit,
  1749. 'gaps': []}
  1750. return {channel_id: self.parameters_ncs[channel_id]}
  1751. def __read_nse_data_header(self, filehandle, filename):
  1752. '''
  1753. Reads the .nse data block headers and stores the information in the
  1754. object's parameters_ncs dictionary.
  1755. Args:
  1756. filehandle (file object):
  1757. Handle to the already opened .nse file.
  1758. filename (string):
  1759. Name of the nse file.
  1760. Returns:
  1761. -
  1762. '''
  1763. [timestamps, channel_ids, cell_numbers, features,
  1764. data_points] = filehandle
  1765. if filehandle is not None:
  1766. t_first = timestamps[0] # in microseconds
  1767. t_last = timestamps[-1] # in microseconds
  1768. channel_id = channel_ids[0]
  1769. cell_count = cell_numbers[0] # number of cells identified
  1770. self.parameters_nse[channel_id] = {'filename': filename,
  1771. 't_first': t_first *
  1772. self.nse_time_unit,
  1773. 't_last': t_last *
  1774. self.nse_time_unit,
  1775. 'cell_count': cell_count}
  1776. def __read_ntt_data_header(self, filehandle, filename):
  1777. '''
  1778. Reads the .nse data block headers and stores the information in the
  1779. object's parameters_ncs dictionary.
  1780. Args:
  1781. filehandle (file object):
  1782. Handle to the already opened .nse file.
  1783. filename (string):
  1784. Name of the nse file.
  1785. Returns:
  1786. -
  1787. '''
  1788. [timestamps, channel_ids, cell_numbers, features,
  1789. data_points] = filehandle
  1790. if filehandle is not None:
  1791. t_first = timestamps[0] # in microseconds
  1792. t_last = timestamps[-1] # in microseconds
  1793. channel_id = channel_ids[0]
  1794. cell_count = cell_numbers[0] # number of cells identified
  1795. # spike_parameters = filehandle[0][3]
  1796. # else:
  1797. # t_first = None
  1798. # channel_id = None
  1799. # cell_count = 0
  1800. # # spike_parameters = None
  1801. #
  1802. # self._diagnostic_print('Empty file: No information
  1803. # contained in %s'%filename)
  1804. self.parameters_ntt[channel_id] = {'filename': filename,
  1805. 't_first': t_first *
  1806. self.ntt_time_unit,
  1807. 't_last': t_last *
  1808. self.nse_time_unit,
  1809. 'cell_count': cell_count}
  1810. def __read_nev_data_header(self, filehandle, filename):
  1811. '''
  1812. Reads the .nev data block headers and stores the relevant information
  1813. in the
  1814. object's parameters_nev dictionary.
  1815. Args:
  1816. filehandle (file object):
  1817. Handle to the already opened .nev file.
  1818. filename (string):
  1819. Name of the nev file.
  1820. Returns:
  1821. -
  1822. '''
  1823. # Extracting basic recording events to be able to check recording
  1824. # consistency
  1825. if filename in self.parameters_nev:
  1826. raise ValueError(
  1827. 'Detected multiple nev files of name %s.' % (filename))
  1828. else:
  1829. self.parameters_nev[filename] = {}
  1830. if 'Starting_Recording' in self.parameters_nev[filename]:
  1831. raise ValueError('Trying to read second nev file of name %s. '
  1832. ' Only one can be handled.' % filename)
  1833. self.parameters_nev[filename]['Starting_Recording'] = []
  1834. self.parameters_nev[filename]['events'] = []
  1835. for event in filehandle:
  1836. # separately extracting 'Starting Recording'
  1837. if ((event[4] in [11, 19]) and
  1838. (event[10].decode('latin-1') == 'Starting Recording')):
  1839. self.parameters_nev[filename]['Starting_Recording'].append(
  1840. event[3] * self.nev_time_unit)
  1841. # adding all events to parameter collection
  1842. self.parameters_nev[filename]['events'].append(
  1843. {'timestamp': event[3] * self.nev_time_unit,
  1844. 'event_id': event[4],
  1845. 'nttl': event[5],
  1846. 'name': event[10].decode('latin-1')})
  1847. if len(self.parameters_nev[filename]['Starting_Recording']) < 1:
  1848. raise ValueError(
  1849. 'No Event "Starting_Recording" detected in %s' % (
  1850. filename))
  1851. self.parameters_nev[filename]['t_start'] = min(
  1852. self.parameters_nev[filename]['Starting_Recording'])
  1853. # t_stop = time stamp of last event in file
  1854. self.parameters_nev[filename]['t_stop'] = max(
  1855. [e['timestamp'] for e in
  1856. self.parameters_nev[filename]['events']])
  1857. # extract all occurring event types (= combination of nttl,
  1858. # event_id and name/string)
  1859. event_types = copy.deepcopy(self.parameters_nev[filename]['events'])
  1860. for d in event_types:
  1861. d.pop('timestamp')
  1862. self.parameters_nev[filename]['event_types'] = [dict(y) for y in
  1863. set(tuple(
  1864. x.items())
  1865. for x in
  1866. event_types)]
  1867. # ________________ File Checks __________________________________
  1868. def __ncs_packet_check(self, filehandle):
  1869. '''
  1870. Checks consistency of data in ncs file and raises assertion error if a
  1871. check fails. Detected recording gaps are added to parameter_ncs
  1872. Args:
  1873. filehandle (file object):
  1874. Handle to the already opened .ncs file.
  1875. '''
  1876. timestamps = filehandle[0]
  1877. header_u4 = filehandle[1]
  1878. # checking sampling rate of data packets
  1879. sr0 = header_u4[0, 1]
  1880. assert all(header_u4[:, 1] == sr0)
  1881. # checking channel id of data packets
  1882. channel_id = header_u4[0, 0]
  1883. assert all(header_u4[:, 0] == channel_id)
  1884. # time offset of data packets
  1885. # TODO: Check if there is a safer way to do the delta_t check for ncs
  1886. # data packets
  1887. # this is a not safe assumption, that the first two data packets have
  1888. # correct time stamps
  1889. delta_t = timestamps[1] - timestamps[0]
  1890. # valid samples of first data packet
  1891. temp_valid_samples = header_u4[0, 2]
  1892. # unit test
  1893. # time difference between packets corresponds to number of recorded
  1894. # samples
  1895. assert delta_t == (
  1896. temp_valid_samples / (
  1897. self.ncs_time_unit.rescale(pq.s).magnitude * sr0))
  1898. self._diagnostic_print('NCS packet check successful.')
  1899. def __nse_check(self, filehandle):
  1900. '''
  1901. Checks consistency of data in ncs file and raises assertion error if a
  1902. check fails.
  1903. Args:
  1904. filehandle (file object):
  1905. Handle to the already opened .nse file.
  1906. '''
  1907. [timestamps, channel_ids, cell_numbers, features,
  1908. data_points] = filehandle
  1909. assert all(channel_ids == channel_ids[0])
  1910. assert all([len(dp) == len(data_points[0]) for dp in data_points])
  1911. self._diagnostic_print('NSE file check successful.')
  1912. def __nev_check(self, filehandle):
  1913. '''
  1914. Checks consistency of data in nev file and raises assertion error if a
  1915. check fails.
  1916. Args:
  1917. filehandle (file object):
  1918. Handle to the already opened .nev file.
  1919. '''
  1920. # this entry should always equal 2 (see Neuralynx File Description),
  1921. # but it is not. For me, this is 0.
  1922. assert all([f[2] == 2 or f[2] == 0 for f in filehandle])
  1923. # TODO: check with more nev files, if index 0,1,2,6,7,8 and 9 can be
  1924. # non-zero. Interpretation? Include in event extraction.
  1925. # only observed 0 for index 0,1,2,6,7,8,9 in nev files.
  1926. # If they are non-zero, this needs to be included in event extraction
  1927. assert all([f[0] == 0 for f in filehandle])
  1928. assert all([f[1] == 0 for f in filehandle])
  1929. assert all([f[2] in [0, 2] for f in filehandle])
  1930. assert all([f[6] == 0 for f in filehandle])
  1931. assert all([f[7] == 0 for f in filehandle])
  1932. assert all([f[8] == 0 for f in filehandle])
  1933. assert all([all(f[9] == 0) for f in filehandle])
  1934. self._diagnostic_print('NEV file check successful.')
  1935. def __ntt_check(self, filehandle):
  1936. '''
  1937. Checks consistency of data in ncs file and raises assertion error if a
  1938. check fails.
  1939. Args:
  1940. filehandle (file object):
  1941. Handle to the already opened .nse file.
  1942. '''
  1943. # TODO: check this when first .ntt files are available
  1944. [timestamps, channel_ids, cell_numbers, features,
  1945. data_points] = filehandle
  1946. assert all(channel_ids == channel_ids[0])
  1947. assert all([len(dp) == len(data_points[0]) for dp in data_points])
  1948. self._diagnostic_print('NTT file check successful.')
  1949. def __ncs_gap_check(self, filehandle):
  1950. '''
  1951. Checks individual data blocks of ncs files for consistent starting
  1952. times with respect to sample count.
  1953. This covers intended recording gaps as well as shortened data packet,
  1954. which are incomplete
  1955. '''
  1956. timestamps = filehandle[0]
  1957. header_u4 = filehandle[1]
  1958. channel_id = header_u4[0, 0]
  1959. if channel_id not in self.parameters_ncs:
  1960. self.parameters_ncs[channel_id] = {}
  1961. # time stamps of data packets
  1962. delta_t = timestamps[1] - timestamps[0] # in microsec
  1963. data_packet_offsets = np.diff(timestamps) # in microsec
  1964. # check if delta_t corresponds to number of valid samples present in
  1965. # data packets
  1966. # NOTE: This also detects recording gaps!
  1967. valid_samples = header_u4[:-1, 2]
  1968. sampling_rate = header_u4[0, 1]
  1969. packet_checks = (valid_samples / (self.ncs_time_unit.rescale(
  1970. pq.s).magnitude * sampling_rate)) == data_packet_offsets
  1971. if not all(packet_checks):
  1972. if 'broken_packets' not in self.parameters_ncs[channel_id]:
  1973. self.parameters_ncs[channel_id]['broken_packets'] = []
  1974. broken_packets = np.where(np.array(packet_checks) == False)[0]
  1975. for broken_packet in broken_packets:
  1976. self.parameters_ncs[channel_id]['broken_packets'].append(
  1977. (broken_packet,
  1978. valid_samples[broken_packet],
  1979. data_packet_offsets[broken_packet]))
  1980. self._diagnostic_print('Detected broken packet in NCS file at '
  1981. 'packet id %i (sample number %i '
  1982. 'time offset id %i)'
  1983. '' % (broken_packet,
  1984. valid_samples[broken_packet],
  1985. data_packet_offsets[broken_packet])
  1986. ) # in microsec
  1987. # checking for irregular data packet durations -> gaps / shortened
  1988. # data packets
  1989. if not all(data_packet_offsets == delta_t):
  1990. if 'gaps' not in self.parameters_ncs[channel_id]:
  1991. self.parameters_ncs[channel_id]['gaps'] = []
  1992. # gap identification by (sample of gap start, duration)
  1993. # gap packets
  1994. gap_packet_ids = np.where(data_packet_offsets != delta_t)[0]
  1995. for gap_packet_id in gap_packet_ids:
  1996. # skip if this packet starting time is known to be corrupted
  1997. # hoping no corruption and gap occurs simultaneously
  1998. # corrupted time stamp affects two delta_t comparisons:
  1999. if gap_packet_id in self.parameters_ncs[channel_id][
  2000. 'invalid_first_samples'] \
  2001. or gap_packet_id + 1 in self.parameters_ncs[channel_id][
  2002. 'invalid_first_samples']:
  2003. continue
  2004. gap_start = timestamps[
  2005. gap_packet_id] # t_start of last packet [microsec]
  2006. gap_stop = timestamps[
  2007. gap_packet_id + 1] # t_stop of first packet [microsec]
  2008. self.parameters_ncs[channel_id]['gaps'].append((gap_packet_id,
  2009. gap_start,
  2010. gap_stop)) #
  2011. # [,microsec,microsec]
  2012. self._diagnostic_print('Detected gap in NCS file between'
  2013. 'sample time %i and %i (last correct '
  2014. 'packet id %i)' % (gap_start, gap_stop,
  2015. gap_packet_id))
  2016. def __ncs_invalid_first_sample_check(self, filehandle):
  2017. '''
  2018. Checks data blocks of ncs files for corrupted starting times indicating
  2019. a missing first sample in the data packet. These are then excluded from
  2020. the gap check, but ignored for further analysis.
  2021. '''
  2022. timestamps = filehandle[0]
  2023. header_u4 = filehandle[1]
  2024. channel_id = header_u4[0, 0]
  2025. self.parameters_ncs[channel_id]['invalid_first_samples'] = []
  2026. # checking if first bit of timestamp is 1, which indicates error
  2027. invalid_packet_ids = np.where(timestamps >= 2 ** 55)[0]
  2028. if len(invalid_packet_ids) > 0:
  2029. warnings.warn('Invalid first sample(s) detected in ncs file'
  2030. '(packet id(s) %i)! This error is ignored in'
  2031. 'subsequent routines.' % (invalid_packet_ids))
  2032. self.parameters_ncs[channel_id][
  2033. 'invalid_first_samples'] = invalid_packet_ids
  2034. # checking consistency of data around corrupted packet time
  2035. for invalid_packet_id in invalid_packet_ids:
  2036. if invalid_packet_id < 2 or invalid_packet_id > len(
  2037. filehandle) - 2:
  2038. raise ValueError(
  2039. 'Corrupted ncs data packet at the beginning'
  2040. 'or end of file.')
  2041. elif (timestamps[invalid_packet_id + 1] - timestamps[
  2042. invalid_packet_id - 1]
  2043. != 2 * (
  2044. timestamps[invalid_packet_id - 1] - timestamps[
  2045. invalid_packet_id - 2])):
  2046. raise ValueError('Starting times of ncs data packets around'
  2047. 'corrupted data packet are not '
  2048. 'consistent!')
  2049. ############ Supplementory Functions ###########################
  2050. def get_channel_id_by_file_name(self, filename):
  2051. """
  2052. Checking parameters of NCS, NSE and NTT Files for given filename and
  2053. return channel_id if result is consistent
  2054. :param filename:
  2055. :return:
  2056. """
  2057. channel_ids = []
  2058. channel_ids += [k for k in self.parameters_ncs if
  2059. self.parameters_ncs[k]['filename'] == filename]
  2060. channel_ids += [k for k in self.parameters_nse if
  2061. self.parameters_nse[k]['filename'] == filename]
  2062. channel_ids += [k for k in self.parameters_ntt if
  2063. self.parameters_ntt[k]['filename'] == filename]
  2064. if len(np.unique(np.asarray(channel_ids))) == 1:
  2065. return channel_ids[0]
  2066. elif len(channel_ids) > 1:
  2067. raise ValueError(
  2068. 'Ambiguous channel ids detected. Filename %s is associated'
  2069. ' to different channels of NCS and NSE and NTT %s'
  2070. '' % (filename, channel_ids))
  2071. else: # if filename was not detected
  2072. return None
  2073. def hashfile(self, afile, hasher, blocksize=65536):
  2074. buf = afile.read(blocksize)
  2075. while len(buf) > 0:
  2076. hasher.update(buf)
  2077. buf = afile.read(blocksize)
  2078. return hasher.digest()
  2079. def datesizefile(self, filename):
  2080. return str(os.path.getmtime(filename)) + '_' + str(
  2081. os.path.getsize(filename))
  2082. def _diagnostic_print(self, text):
  2083. '''
  2084. Print a diagnostic message.
  2085. Args:
  2086. text (string):
  2087. Diagnostic text to print.
  2088. Returns:
  2089. -
  2090. '''
  2091. if self._print_diagnostic:
  2092. print('NeuralynxIO: ' + text)