asciispiketrainio.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. # -*- coding: utf-8 -*-
  2. """
  3. Classe for reading/writing SpikeTrains in a text file.
  4. It is the simple case where different spiketrains are written line by line.
  5. Supported : Read/Write
  6. Author: sgarcia
  7. """
  8. import os
  9. import numpy as np
  10. import quantities as pq
  11. from neo.io.baseio import BaseIO
  12. from neo.core import Segment, SpikeTrain
  13. class AsciiSpikeTrainIO(BaseIO):
  14. """
  15. Class for reading/writing SpikeTrains in a text file.
  16. Each Spiketrain is a line.
  17. Usage:
  18. >>> from neo import io
  19. >>> r = io.AsciiSpikeTrainIO( filename = 'File_ascii_spiketrain_1.txt')
  20. >>> seg = r.read_segment()
  21. >>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
  22. [<SpikeTrain(array([ 3.89981604, 4.73258781, 0.608428 , 4.60246277, 1.23805797,
  23. ...
  24. """
  25. is_readable = True
  26. is_writable = True
  27. supported_objects = [Segment, SpikeTrain]
  28. readable_objects = [Segment]
  29. writeable_objects = [Segment]
  30. has_header = False
  31. is_streameable = False
  32. read_params = {
  33. Segment: [
  34. ('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
  35. ('t_start', {'value': 0., }),
  36. ]
  37. }
  38. write_params = {
  39. Segment: [
  40. ('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
  41. ]
  42. }
  43. name = None
  44. extensions = ['txt']
  45. mode = 'file'
  46. def __init__(self, filename=None):
  47. """
  48. This class read/write SpikeTrains in a text file.
  49. Each row is a spiketrain.
  50. **Arguments**
  51. filename : the filename to read/write
  52. """
  53. BaseIO.__init__(self)
  54. self.filename = filename
  55. def read_segment(self,
  56. lazy=False,
  57. delimiter='\t',
  58. t_start=0. * pq.s,
  59. unit=pq.s,
  60. ):
  61. """
  62. Arguments:
  63. delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
  64. t_start : time start of all spiketrain 0 by default
  65. unit : unit of spike times, can be a str or directly a Quantities
  66. """
  67. assert not lazy, 'Do not support lazy'
  68. unit = pq.Quantity(1, unit)
  69. seg = Segment(file_origin=os.path.basename(self.filename))
  70. f = open(self.filename, 'Ur')
  71. for i, line in enumerate(f):
  72. alldata = line[:-1].split(delimiter)
  73. if alldata[-1] == '':
  74. alldata = alldata[:-1]
  75. if alldata[0] == '':
  76. alldata = alldata[1:]
  77. spike_times = np.array(alldata).astype('f')
  78. t_stop = spike_times.max() * unit
  79. sptr = SpikeTrain(spike_times * unit, t_start=t_start, t_stop=t_stop)
  80. sptr.annotate(channel_index=i)
  81. seg.spiketrains.append(sptr)
  82. f.close()
  83. seg.create_many_to_one_relationship()
  84. return seg
  85. def write_segment(self, segment,
  86. delimiter='\t',
  87. ):
  88. """
  89. Write SpikeTrain of a Segment in a txt file.
  90. Each row is a spiketrain.
  91. Arguments:
  92. segment : the segment to write. Only analog signals will be written.
  93. delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';'
  94. information of t_start is lost
  95. """
  96. f = open(self.filename, 'w')
  97. for s, sptr in enumerate(segment.spiketrains):
  98. for ts in sptr:
  99. f.write('%f%s' % (ts, delimiter))
  100. f.write('\n')
  101. f.close()