Browse Source

Update to neo 0.9.0 and elephant 0.9.0

Julia Sprenger 3 years ago
parent
commit
ec5662cdd9
100 changed files with 93 additions and 14971 deletions
  1. 0 86
      code/elephant/.gitignore
  2. 0 83
      code/elephant/.travis.yml
  3. 1 0
      code/elephant/CITATION.txt
  4. 1 10
      code/elephant/LICENSE.txt
  5. 1 18
      code/elephant/MANIFEST.in
  6. 1 0
      code/elephant/PKG-INFO
  7. 1 0
      code/elephant/README.md
  8. 1 153
      code/elephant/doc/Makefile
  9. 1 0
      code/elephant/doc/_templates/autosummary/class.rst
  10. 1 0
      code/elephant/doc/acknowledgments.rst
  11. 1 68
      code/elephant/doc/authors.rst
  12. 1 0
      code/elephant/doc/bib/elephant.bib
  13. 1 0
      code/elephant/doc/citation.rst
  14. 1 367
      code/elephant/doc/conf.py
  15. 1 75
      code/elephant/doc/developers_guide.rst
  16. 1 0
      code/elephant/doc/documentation_guide.rst
  17. 1 0
      code/elephant/doc/get_in_touch.rst
  18. BIN
      code/elephant/doc/images/elephant_favicon.ico
  19. BIN
      code/elephant/doc/images/elephant_logo.png
  20. BIN
      code/elephant/doc/images/elephant_logo_sidebar.png
  21. 1 58
      code/elephant/doc/index.rst
  22. 1 136
      code/elephant/doc/install.rst
  23. 1 0
      code/elephant/doc/maintainers_guide.rst
  24. 1 190
      code/elephant/doc/make.bat
  25. 1 35
      code/elephant/doc/modules.rst
  26. 1 0
      code/elephant/doc/reference/_spike_train_processing.rst
  27. 1 14
      code/elephant/doc/reference/asset.rst
  28. 1 0
      code/elephant/doc/reference/causality.rst
  29. 1 0
      code/elephant/doc/reference/cell_assembly_detection.rst
  30. 1 0
      code/elephant/doc/reference/change_point_detection.rst
  31. 1 6
      code/elephant/doc/reference/conversion.rst
  32. 1 6
      code/elephant/doc/reference/cubic.rst
  33. 1 6
      code/elephant/doc/reference/current_source_density.rst
  34. 1 0
      code/elephant/doc/reference/gpfa.rst
  35. 1 5
      code/elephant/doc/reference/kernels.rst
  36. 1 6
      code/elephant/doc/reference/neo_tools.rst
  37. 1 6
      code/elephant/doc/reference/pandas_bridge.rst
  38. 1 0
      code/elephant/doc/reference/parallel.rst
  39. 1 0
      code/elephant/doc/reference/phase_analysis.rst
  40. 1 13
      code/elephant/doc/reference/signal_processing.rst
  41. 1 0
      code/elephant/doc/reference/spade.rst
  42. 1 6
      code/elephant/doc/reference/spectral.rst
  43. 1 13
      code/elephant/doc/reference/spike_train_correlation.rst
  44. 1 8
      code/elephant/doc/reference/spike_train_dissimilarity.rst
  45. 1 11
      code/elephant/doc/reference/spike_train_generation.rst
  46. 1 12
      code/elephant/doc/reference/spike_train_surrogates.rst
  47. 1 0
      code/elephant/doc/reference/spike_train_synchrony.rst
  48. 1 6
      code/elephant/doc/reference/sta.rst
  49. 1 5
      code/elephant/doc/reference/statistics.rst
  50. 1 0
      code/elephant/doc/reference/toctree/kernels/elephant.kernels.AlphaKernel.rst
  51. 1 0
      code/elephant/doc/reference/toctree/kernels/elephant.kernels.EpanechnikovLikeKernel.rst
  52. 1 0
      code/elephant/doc/reference/toctree/kernels/elephant.kernels.ExponentialKernel.rst
  53. 1 0
      code/elephant/doc/reference/toctree/kernels/elephant.kernels.GaussianKernel.rst
  54. 1 0
      code/elephant/doc/reference/toctree/kernels/elephant.kernels.LaplacianKernel.rst
  55. 1 0
      code/elephant/doc/reference/toctree/kernels/elephant.kernels.RectangularKernel.rst
  56. 1 0
      code/elephant/doc/reference/toctree/kernels/elephant.kernels.TriangularKernel.rst
  57. 1 23
      code/elephant/doc/reference/unitary_event_analysis.rst
  58. 1 0
      code/elephant/doc/reference/waveform_features.rst
  59. 1 334
      code/elephant/doc/release_notes.rst
  60. 1 0
      code/elephant/doc/style_guide.rst
  61. 1 0
      code/elephant/doc/tutorials.rst
  62. 1 0
      code/elephant/doc/tutorials/asset.ipynb
  63. 1 0
      code/elephant/doc/tutorials/asset_showcase_500.nix
  64. 1 0
      code/elephant/doc/tutorials/gpfa.ipynb
  65. 1 0
      code/elephant/doc/tutorials/parallel.ipynb
  66. 1 0
      code/elephant/doc/tutorials/statistics.ipynb
  67. 1 0
      code/elephant/doc/tutorials/unitary_event_analysis.ipynb
  68. 1 0
      code/elephant/elephant/VERSION
  69. 1 50
      code/elephant/elephant/__init__.py
  70. 1 1936
      code/elephant/elephant/asset.py
  71. 1 0
      code/elephant/elephant/causality/__init__.py
  72. 1 0
      code/elephant/elephant/causality/granger.py
  73. 1 1210
      code/elephant/elephant/cell_assembly_detection.py
  74. 1 503
      code/elephant/elephant/change_point_detection.py
  75. 1 1073
      code/elephant/elephant/conversion.py
  76. 1 223
      code/elephant/elephant/cubic.py
  77. 1 348
      code/elephant/elephant/current_source_density.py
  78. 1 1059
      code/elephant/elephant/current_source_density_src/KCSD.py
  79. 1 93
      code/elephant/elephant/current_source_density_src/README.md
  80. 1 3
      code/elephant/elephant/current_source_density_src/__init__.py
  81. 1 201
      code/elephant/elephant/current_source_density_src/basis_functions.py
  82. 1 887
      code/elephant/elephant/current_source_density_src/icsd.py
  83. BIN
      code/elephant/elephant/current_source_density_src/test_data.mat
  84. 1 364
      code/elephant/elephant/current_source_density_src/utility_functions.py
  85. 1 0
      code/elephant/elephant/gpfa/__init__.py
  86. 1 0
      code/elephant/elephant/gpfa/gpfa.py
  87. 1 0
      code/elephant/elephant/gpfa/gpfa_core.py
  88. 1 0
      code/elephant/elephant/gpfa/gpfa_util.py
  89. 1 913
      code/elephant/elephant/kernels.py
  90. 1 228
      code/elephant/elephant/neo_tools.py
  91. 1 618
      code/elephant/elephant/pandas_bridge.py
  92. 1 0
      code/elephant/elephant/parallel/__init__.py
  93. 1 0
      code/elephant/elephant/parallel/mpi.py
  94. 1 0
      code/elephant/elephant/parallel/parallel.py
  95. 1 192
      code/elephant/elephant/phase_analysis.py
  96. 1 951
      code/elephant/elephant/signal_processing.py
  97. 1 2349
      code/elephant/elephant/spade.py
  98. 1 11
      code/elephant/elephant/spade_src/LICENSE
  99. 1 0
      code/elephant/elephant/spade_src/__init__.py
  100. 0 0
      code/elephant/elephant/spade_src/fast_fca.py

+ 0 - 86
code/elephant/.gitignore

@@ -1,86 +0,0 @@
-#########################################
-# Editor temporary/working/backup files #
-.#*
-[#]*#
-*~
-*$
-*.bak
-.coverage
-*.kdev4
-*.komodoproject
-.mr.developer.cfg
-nosetests.xml
-*.orig
-.project
-.pydevproject
-.settings
-*.tmp*
-.idea/
-venv/
-env/
-.pytest_cache/
-
-# Compiled source #
-###################
-*.a
-*.com
-*.class
-*.dll
-*.exe
-*.mo
-*.o
-*.py[ocd]
-*.so
-
-# Python files #
-################
-# setup.py working directory
-build
-# other build directories
-bin
-parts
-var
-lib
-lib64
-# sphinx build directory
-doc/_build
-doc/reference/toctree/*
-!doc/reference/toctree/asset/elephant.asset.ASSET.rst
-!doc/reference/toctree/kernels
-*.h5
-# setup.py dist directory
-dist
-sdist
-# Egg metadata
-*.egg-info
-*.egg
-*.EGG
-*.EGG-INFO
-eggs
-develop-eggs
-# tox testing tool
-.tox
-# Packages
-.installed.cfg
-pip-log.txt
-# coverage
-cover
-
-# OS generated files #
-######################
-.directory
-.gdb_history
-.DS_Store
-ehthumbs.db
-Icon?
-Thumbs.db
-
-# Things specific to this project #
-###################################
-# ignored folder for fast prototyping
-ignored/
-
-.ipynb_checkpoints/
-
-# data
-*.nix

+ 0 - 83
code/elephant/.travis.yml

@@ -1,83 +0,0 @@
-dist: xenial
-language: python
-sudo: false
-
-addons:
-   apt:
-     update: true
-
-
-matrix:
-  include:
-    - name: "conda 2.7"
-      python: 2.7
-      env: DISTRIB="conda"
-      before_install: sed -i 's/conda-forge/conda/g' requirements/environment.yml
-
-    - name: "pip 2.7"
-      python: 2.7
-      env: DISTRIB="pip"
-
-    - name: "pip 3.5"
-      python: 3.5
-      env: DISTRIB="pip"
-
-    - name: "pip 3.6 requirements-extras"
-      python: 3.6
-      env: DISTRIB="pip"
-      before_install: sudo apt install -y libopenmpi-dev openmpi-bin
-      before_script: pip install -r requirements/requirements-extras.txt
-      script: mpiexec -n 1 python -m mpi4py.futures -m nose --with-coverage --cover-package=elephant
-      after_success: coveralls || echo "coveralls failed"
-
-    - name: "conda 3.7"
-      python: 3.7
-      env: DISTRIB="conda"
-
-    - name: "conda 3.8"
-      python: 3.8
-      env: DISTRIB="conda"
-
-    - name: "pip 3.8"
-      python: 3.8
-      env: DISTRIB="pip"
-
-    - name: "docs"
-      python: 3.6
-      env: DISTRIB="conda"
-      before_install: sudo apt install -y libopenmpi-dev openmpi-bin
-      before_script:
-        - conda install -c conda-forge pandoc
-        - pip install -r requirements/requirements-docs.txt
-        - pip install -r requirements/requirements-tutorials.txt
-        - pip install -r requirements/requirements-extras.txt
-        - sed -i -E "s/nbsphinx_execute *=.*/nbsphinx_execute = 'always'/g" doc/conf.py
-      script: cd doc && make html
-
-install:
-  - if [[ "${DISTRIB}" == "conda" ]];
-    then
-      py_major=${TRAVIS_PYTHON_VERSION:0:1};
-      wget https://repo.continuum.io/miniconda/Miniconda${py_major}-latest-Linux-x86_64.sh -O miniconda.sh;
-      bash miniconda.sh -b -p $HOME/miniconda;
-      source "$HOME/miniconda/etc/profile.d/conda.sh";
-      conda config --set always_yes yes;
-      conda update conda;
-      sed -i "s/python>=[0-9]\.[0-9]/python=${TRAVIS_PYTHON_VERSION}/g" requirements/environment.yml;
-      conda env create -f requirements/environment.yml;
-      conda activate elephant;
-      conda uninstall -y mpi4py;
-      pip list;
-    else
-      pip install -r requirements/requirements.txt;
-    fi
-
-  - pip -V
-  - pip install coverage coveralls nose
-  - python setup.py install
-  - python -c "from elephant.spade import HAVE_FIM; assert HAVE_FIM"
-  - pip list
-  - python --version
-
-script:
-  nosetests --with-coverage --cover-package=elephant

+ 1 - 0
code/elephant/CITATION.txt

@@ -0,0 +1 @@
+/annex/objects/MD5-s150--406acd3d4c454eafea04e056a70e14b8

+ 1 - 10
code/elephant/LICENSE.txt

@@ -1,10 +1 @@
-Copyright (c) 2014-2019, Elephant authors and contributors
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-* Neither the names of the copyright holders nor the names of the contributors may be used to endorse or promote products derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/annex/objects/MD5-s1506--7a9d8755791957b707aa0a9e26faf593

+ 1 - 18
code/elephant/MANIFEST.in

@@ -1,18 +1 @@
-recursive-include elephant *.py
-include requirements/*
-include README.md
-include LICENSE.txt
-include CITATION.txt
-include elephant/VERSION
-include elephant/current_source_density_src/README.md
-include elephant/current_source_density_src/test_data.mat
-include elephant/spade_src/LICENSE
-recursive-include elephant/spade_src *.so *.pyd
-include elephant/test/spike_extraction_test_data.txt
-recursive-include doc *
-prune doc/_build
-prune doc/tutorials/.ipynb_checkpoints
-prune doc/reference/toctree
-include doc/reference/toctree/kernels/*
-recursive-exclude * *.h5
-recursive-exclude * *~
+/annex/objects/MD5-s583--52fb473cd423b20363501db96b01d042

+ 1 - 0
code/elephant/PKG-INFO

@@ -0,0 +1 @@
+/annex/objects/MD5-s2925--ab354f6fa0ea8caa0a76b6107c79f03e

+ 1 - 0
code/elephant/README.md

@@ -0,0 +1 @@
+/annex/objects/MD5-s1770--357e859d4bb2713c06ddf80b40373d35

+ 1 - 153
code/elephant/doc/Makefile

@@ -1,153 +1 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build
-PAPER         =
-BUILDDIR      = _build
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
-
-help:
-	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html       to make standalone HTML files"
-	@echo "  dirhtml    to make HTML files named index.html in directories"
-	@echo "  singlehtml to make a single large HTML file"
-	@echo "  pickle     to make pickle files"
-	@echo "  json       to make JSON files"
-	@echo "  htmlhelp   to make HTML files and a HTML help project"
-	@echo "  qthelp     to make HTML files and a qthelp project"
-	@echo "  devhelp    to make HTML files and a Devhelp project"
-	@echo "  epub       to make an epub"
-	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
-	@echo "  text       to make text files"
-	@echo "  man        to make manual pages"
-	@echo "  texinfo    to make Texinfo files"
-	@echo "  info       to make Texinfo files and run them through makeinfo"
-	@echo "  gettext    to make PO message catalogs"
-	@echo "  changes    to make an overview of all changed/added/deprecated items"
-	@echo "  linkcheck  to check all external links for integrity"
-	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-	-rm -rf $(BUILDDIR)/*
-
-html:
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
-	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
-	@echo
-	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-	@echo
-	@echo "Build finished; now you can process the pickle files."
-
-json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-	@echo
-	@echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-	@echo
-	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
-	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
-	@echo
-	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
-	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
-	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Elephant.qhcp"
-	@echo "To view the help file:"
-	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Elephant.qhc"
-
-devhelp:
-	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
-	@echo
-	@echo "Build finished."
-	@echo "To view the help file:"
-	@echo "# mkdir -p $$HOME/.local/share/devhelp/Elephant"
-	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Elephant"
-	@echo "# devhelp"
-
-epub:
-	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
-	@echo
-	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo
-	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-	@echo "Run \`make' in that directory to run these through (pdf)latex" \
-	      "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo "Running LaTeX files through pdflatex..."
-	$(MAKE) -C $(BUILDDIR)/latex all-pdf
-	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
-	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
-	@echo
-	@echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
-	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
-	@echo
-	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-texinfo:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo
-	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
-	@echo "Run \`make' in that directory to run these through makeinfo" \
-	      "(use \`make info' here to do that automatically)."
-
-info:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo "Running Texinfo files through makeinfo..."
-	make -C $(BUILDDIR)/texinfo info
-	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-gettext:
-	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
-	@echo
-	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-	@echo
-	@echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-	@echo
-	@echo "Link check complete; look for any errors in the above output " \
-	      "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in $(BUILDDIR)/doctest/output.txt."
+/annex/objects/MD5-s5572--c4a12f8b6ba74cceea47e8a9fd972340

+ 1 - 0
code/elephant/doc/_templates/autosummary/class.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s590--725730472bef3c901ac3d2766d3f7945

+ 1 - 0
code/elephant/doc/acknowledgments.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s351--ebfb71a1bf24b00d2557d0159673b6a4

+ 1 - 68
code/elephant/doc/authors.rst

@@ -1,68 +1 @@
-.. _authors:
-
-************************
-Authors and contributors
-************************
-
-The following people have contributed code and/or ideas to the current version
-of Elephant. The institutional affiliations are those at the time of the
-contribution, and may not be the current affiliation of a contributor.
-
-Do you want to contribute to Elephant? Please refer to the
-:ref:`developers_guide`.
-
-* Alper Yegenoglu [1]
-* Andrew Davison [2]
-* Björn Müller [1]
-* Detlef Holstein [2]
-* Eilif Muller [3, 4]
-* Emiliano Torre [1]
-* Espen Hagen [1]
-* Jeffrey Gill [11]
-* Jan Gosmann [6, 8]
-* Julia Sprenger [1]
-* Junji Ito [1]
-* Michael Denker [1]
-* Paul Chorley [1]
-* Pierre Yger [2]
-* Pietro Quaglio [1]
-* Richard Meyes [1]
-* Vahid Rostami [1]
-* Subhasis Ray [5]
-* Robert Pröpper [6]
-* Richard C Gerkin [7]
-* Bartosz Telenczuk [2]
-* Chaitanya Chintaluri [9]
-* Michał Czerwiński [9]
-* Michael von Papen [1]
-* Robin Gutzen [1]
-* Felipe Méndez [10]
-* Simon Essink [1]
-* Alessandra Stella [1]
-* Peter Bouss [1]
-* Alexander van Meegen [1]
-* Aitor Morales-Gregorio [1]
-* Cristiano Köhler [1]
-* Paulina Dąbrowska [1]
-* Jan Lewen [1]
-* Alexander Kleinjohann [1]
-* Danylo Ulianych [1]
-* Anno Kurth [1]
-* Regimantas Jurkus [1]
-* Philipp Steigerwald [12]
-* Manuel Ciba [12]
-
-1. Institute of Neuroscience and Medicine (INM-6), Computational and Systems Neuroscience & Institute for Advanced Simulation (IAS-6), Theoretical Neuroscience, Jülich Research Centre and JARA, Jülich, Germany
-2. Unité de Neurosciences, Information et Complexité, CNRS UPR 3293, Gif-sur-Yvette, France
-3. Electronic Visions Group, Kirchhoff-Institute for Physics, University of Heidelberg, Germany
-4. Brain-Mind Institute, Ecole Polytechnique Fédérale de Lausanne, Switzerland
-5. NIH–NICHD, Laboratory of Cellular and Synaptic Physiology, Bethesda, Maryland 20892, USA
-6. Neural Information Processing Group, Institute of Software Engineering and Theoretical Computer Science, Technische Universität Berlin, Germany
-7. Arizona State University School of Life Sciences, USA
-8. Computational Neuroscience Research Group (CNRG), Waterloo Centre for Theoretical Neuroscience, Waterloo, Canada
-9. Nencki Institute of Experimental Biology, Warsaw, Poland
-10. Instituto de Neurobiología, Universidad Nacional Autónoma de México, Mexico City, Mexico
-11. Case Western Reserve University (CWRU), Cleveland, OH, USA
-12. BioMEMS Lab, TH Aschaffenburg University of applied sciences, Germany
-
-If we've somehow missed you off the list we're very sorry - please let us know.
+/annex/objects/MD5-s2541--2d51cc6845fad7510f6ed8b7d8146b21

+ 1 - 0
code/elephant/doc/bib/elephant.bib

@@ -0,0 +1 @@
+/annex/objects/MD5-s4032--9b892b5b2c865d8cfb12d21017b9f7ad

+ 1 - 0
code/elephant/doc/citation.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s1197--32bed887c47c0d826c404f45f7572698

+ 1 - 367
code/elephant/doc/conf.py

@@ -1,367 +1 @@
-# -*- coding: utf-8 -*-
-#
-# Elephant documentation build configuration file, created by
-# sphinx-quickstart on Wed Feb  5 17:11:26 2014.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import os
-import sys
-from datetime import date
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, '..')
-
-# -- General configuration -----------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = [
-    'sphinx.ext.autodoc',
-    'sphinx.ext.autosummary',
-    'sphinx.ext.doctest',
-    'sphinx.ext.intersphinx',
-    'sphinx.ext.todo',
-    'sphinx.ext.imgmath',
-    'sphinx.ext.viewcode',
-    'sphinx.ext.mathjax',
-    'sphinxcontrib.bibtex',
-    'matplotlib.sphinxext.plot_directive',
-    'numpydoc',
-    'nbsphinx',
-    'sphinx_tabs.tabs',
-]
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'Elephant'
-authors = u'Elephant authors and contributors'
-copyright = u"2014-{this_year}, {authors}".format(this_year=date.today().year,
-                                                  authors=authors)
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-
-root_dir = os.path.dirname(os.path.dirname(__file__))
-with open(os.path.join(root_dir, 'elephant', 'VERSION')) as version_file:
-    # The full version, including alpha/beta/rc tags.
-    release = version_file.read().strip()
-
-# The short X.Y version.
-version = '.'.join(release.split('.')[:-1])
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = [
-    '_build',
-    '**.ipynb_checkpoints',
-    'maintainers_guide.rst',  # should not be visible for users
-]
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-# Only execute Jupyter notebooks that have no evaluated cells
-nbsphinx_execute = 'auto'
-# Kernel to use for execution
-nbsphinx_kernel_name = 'python3'
-# Cancel compile on errors in notebooks
-nbsphinx_allow_errors = False
-
-# Required to automatically create a summary page for each function listed in
-# the autosummary fields of each module.
-autosummary_generate = True
-
-# don't overwrite our custom toctree/*.rst
-autosummary_generate_overwrite = False
-
-# -- Options for HTML output ---------------------------------------------
-
-# The theme to use for HTML and HTML Help pages.  See the documentation for
-# a list of builtin themes.
-html_theme = 'alabaster'
-html_theme_options = {
-    'font_family': 'Arial',
-    'page_width': '1200px',  # default is 940
-    'sidebar_width': '280px',  # default is 220
-}
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-html_logo = 'images/elephant_logo_sidebar.png'
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-html_favicon = 'images/elephant_favicon.ico'
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-# html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_domain_indices = True
-
-# If false, no index is generated.
-html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-html_show_sphinx = False
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'elephantdoc'
-
-# Suppresses  wrong numpy doc warnings
-# see here https://github.com/phn/pytpm/issues/3#issuecomment-12133978
-numpydoc_show_class_members = False
-
-# A fix for Alabaster theme for no space between a citation reference
-# and citation text
-# https://github.com/sphinx-doc/sphinx/issues/6705#issuecomment-536197438
-html4_writer = True
-
-
-# -- Options for LaTeX output --------------------------------------------
-
-latex_elements = {
-    # The paper size ('letterpaper' or 'a4paper').
-    # 'papersize': 'letterpaper',
-
-    # The font size ('10pt', '11pt' or '12pt').
-    # 'pointsize': '10pt',
-
-    # Additional stuff for the LaTeX preamble.
-    # 'preamble': '',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
-    ('index', 'elephant.tex', u'Elephant Documentation',
-     authors, 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# If true, show page references after internal links.
-#latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_domain_indices = True
-
-
-# -- Options for manual page output --------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
-    ('index', 'elephant', u'Elephant Documentation',
-     [authors], 1)
-]
-
-# If true, show URL addresses after external links.
-#man_show_urls = False
-
-
-# -- Options for Texinfo output ------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-#  dir menu entry, description, category)
-texinfo_documents = [
-    ('index',
-     'Elephant',
-     u'Elephant Documentation',
-     authors,
-     'Elephant',
-     'Elephant is a package for the analysis of neurophysiology data.',
-     'Miscellaneous'),
-]
-
-# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
-
-# If false, no module index is generated.
-#texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
-
-
-# -- Options for Epub output ---------------------------------------------
-
-# Bibliographic Dublin Core info.
-epub_title = project
-epub_author = authors
-epub_publisher = authors
-epub_copyright = copyright
-
-# The language of the text. It defaults to the language option
-# or en if the language is not set.
-#epub_language = ''
-
-# The scheme of the identifier. Typical schemes are ISBN or URL.
-#epub_scheme = ''
-
-# The unique identifier of the text. This can be a ISBN number
-# or the project homepage.
-#epub_identifier = ''
-
-# A unique identification for the text.
-#epub_uid = ''
-
-# A tuple containing the cover image and cover page html template filenames.
-#epub_cover = ()
-
-# HTML files that should be inserted before the pages created by sphinx.
-# The format is a list of tuples containing the path and title.
-#epub_pre_files = []
-
-# HTML files shat should be inserted after the pages created by sphinx.
-# The format is a list of tuples containing the path and title.
-#epub_post_files = []
-
-# A list of files that should not be packed into the epub file.
-#epub_exclude_files = []
-
-# The depth of the table of contents in toc.ncx.
-#epub_tocdepth = 3
-
-# Allow duplicate toc entries.
-#epub_tocdup = True
-
-
-# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'http://docs.python.org/': None}
-
-# Use more reliable mathjax source
-mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
-
-# Remove the copyright notice from docstrings:
-
-
-def process_docstring_remove_copyright(app, what, name, obj, options, lines):
-    copyright_line = None
-    for i, line in enumerate(lines):
-        if line.startswith(':copyright:'):
-            copyright_line = i
-            break
-    if copyright_line:
-        while len(lines) > copyright_line:
-            lines.pop()
-
-
-def setup(app):
-    app.connect('autodoc-process-docstring',
-                process_docstring_remove_copyright)
+/annex/objects/MD5-s11529--919894abd745a96e9811f70e42bfd109

+ 1 - 75
code/elephant/doc/developers_guide.rst

@@ -1,75 +1 @@
-.. _developers_guide:
-
-=================
-Developers' Guide
-=================
-
-.. note:: The documentation guide (how to write a good documentation, naming
-          conventions, docstring examples) is in :ref:`documentation_guide`.
-
-
-1. Follow the instructions in :ref:`prerequisites` to setup a clean conda
-   environment. To be safe, run::
-
-    $ pip uninstall elephant
-
-   to uninstall ``elephant`` in case you've installed it previously as a pip
-   package.
-
-2. Fork `Elephant <https://github.com/NeuralEnsemble/elephant>`_ as described
-   in `Fork a repo <https://help.github.com/en/github/getting-started-with-github/fork-a-repo>`_.
-   Download Elephant source code from your forked repo::
-
-    $ git clone git://github.com/<your-github-profile>/elephant.git
-    $ cd elephant
-
-3. Install requirements.txt, (optionally) requirements-extras.txt, and
-   requirements-tests.txt::
-
-    $ pip install -r requirements/requirements.txt
-    $ pip install -r requirements/requirements-extras.txt  # optional
-    $ pip install -r requirements/requirements-tests.txt
-
-4. Before you make any changes, run the test suite to make sure all the tests
-   pass on your system::
-
-    $ nosetests .
-
-   You can specify a particular module to test, for example
-   ``test_statistics.py``::
-
-    $ nosetests elephant/test/test_statistics.py
-
-   At the end, if you see "OK", then all the tests passed (or were skipped
-   because certain dependencies are not installed), otherwise it will report
-   on tests that failed or produced errors.
-
-5. **Implement the functional you want to add in Elephant**. This includes
-   (either of them):
-
-   * fixing a bug;
-   * improving the documentation;
-   * adding a new functional.
-
-6. If it was a new functional, please write:
-
-   - documentation (refer to :ref:`documentation_guide`);
-   - tests to cover your new functions as much as possible.
-
-7. Run the tests again as described in step 4.
-
-8. Commit your changes::
-
-    $ git add .
-    $ git commit -m "informative commit message"
-    $ git push
-
-   If this is your first commit to the project, please add your name and
-   affiliation/employer to :file:`doc/authors.rst`
-
-9. Open a `pull request <https://github.com/NeuralEnsemble/elephant/pulls>`_.
-   Then we'll merge your code in Elephant.
-
-
-.. note:: If you experience a problem during one of the steps above, please
-          contact us by :ref:`get_in_touch`.
+/annex/objects/MD5-s2966--7c744f8573f853f259e70b75e6fddee8

+ 1 - 0
code/elephant/doc/documentation_guide.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s1248--8f148a9e475a7a821d013cbb10970501

+ 1 - 0
code/elephant/doc/get_in_touch.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s1075--5462185ac556605e4e053b5d0165db73

BIN
code/elephant/doc/images/elephant_favicon.ico


BIN
code/elephant/doc/images/elephant_logo.png


BIN
code/elephant/doc/images/elephant_logo_sidebar.png


+ 1 - 58
code/elephant/doc/index.rst

@@ -1,58 +1 @@
-*********************************************
-Elephant - Electrophysiology Analysis Toolkit
-*********************************************
-
-*Elephant* (Electrophysiology Analysis Toolkit) is an emerging open-source,
-community centered library for the analysis of electrophysiological data in
-the Python programming language.
-
-The focus of Elephant is on generic analysis functions for spike train data and
-time series recordings from electrodes, such as the local field potentials
-(LFP) or intracellular voltages. In addition to providing a common platform for
-analysis codes from different laboratories, the Elephant project aims to
-provide a consistent and homogeneous analysis framework that is built on a
-modular foundation. Elephant is the direct successor to Neurotools_ and
-maintains ties to complementary projects such as ephyviewer_ and
-neurotic_ for raw data visualization.
-
-The input-output data format is either Neo_, Quantity_ or Numpy_ array.
-Quantity is a Numpy-wrapper package for handling physical quantities like
-seconds, milliseconds, Hz, volts, etc. Quantity is used in both Neo and
-Elephant.
-
-Table of Contents
------------------
-
-.. toctree::
-    :maxdepth: 1
-
-    install
-    tutorials
-    modules
-    developers_guide
-    authors
-    release_notes
-    get_in_touch
-    acknowledgments
-    citation
-
-
-
-.. Indices and tables
-.. ==================
-
-.. * :ref:`genindex`
-.. * :ref:`modindex`
-.. * :ref:`search`
-
-
-.. _Neurotools:  http://neuralensemble.org/NeuroTools/
-.. _ephyviewer:  https://ephyviewer.readthedocs.io/en/latest/
-.. _neurotic:  https://neurotic.readthedocs.io/en/latest/
-.. _Neo: http://neuralensemble.org/neo/
-.. _Numpy: http://www.numpy.org/
-.. _Quantity: https://python-quantities.readthedocs.io/en/latest/
-
-
-.. |date| date::
-.. |time| date:: %H:%M
+/annex/objects/MD5-s2195--f3ef8b961d5a06b3c54e8612816918ed

+ 1 - 136
code/elephant/doc/install.rst

@@ -1,136 +1 @@
-.. _install:
-
-************
-Installation
-************
-
-The easiest way to install Elephant is by creating a conda environment, followed by ``pip install elephant``.
-Below is the explanation of how to proceed with these two steps.
-
-
-.. _prerequisites:
-
-Prerequisites
-=============
-
-Elephant requires Python_ 2.7, 3.5, 3.6, 3.7, or 3.8.
-
-.. tabs::
-
-
-    .. tab:: (recommended) Conda (Linux/MacOS/Windows)
-
-        1. Create your conda environment (e.g., `elephant_env`):
-
-           .. code-block:: sh
-
-              conda create --name elephant_env python=3.7 numpy scipy tqdm
-
-        2. Activate your environment:
-
-           .. code-block:: sh
-
-              conda activate elephant_env
-
-
-    .. tab:: Debian/Ubuntu
-
-        Open a terminal and run:
-
-        .. code-block:: sh
-
-           sudo apt-get install python-pip python-numpy python-scipy python-pip python-six python-tqdm
-
-
-
-Installation
-============
-
-.. tabs::
-
-
-    .. tab:: Stable release version
-
-        The easiest way to install Elephant is via pip_:
-
-           .. code-block:: sh
-
-              pip install elephant
-
-        To upgrade to a newer release use the ``--upgrade`` flag:
-
-           .. code-block:: sh
-
-              pip install --upgrade elephant
-
-        If you do not have permission to install software systemwide, you can
-        install into your user directory using the ``--user`` flag:
-
-           .. code-block:: sh
-
-              pip install --user elephant
-
-        To install Elephant with all extra packages, do:
-
-           .. code-block:: sh
-
-              pip install elephant[extras]
-
-
-    .. tab:: Development version
-
-        If you have `Git <https://git-scm.com/>`_ installed on your system,
-        it is also possible to install the development version of Elephant.
-
-        1. Before installing the development version, you may need to uninstall
-           the previously installed version of Elephant:
-
-           .. code-block:: sh
-
-              pip uninstall elephant
-
-        2. Clone the repository and install the local version:
-
-           .. code-block:: sh
-
-              git clone git://github.com/NeuralEnsemble/elephant.git
-              cd elephant
-              pip install -e .
-
-
-
-Dependencies
-------------
-
-The following packages are required to use Elephant (refer to requirements_ for the exact package versions):
-
-    * numpy_ - fast array computations
-    * scipy_ - scientific library for Python
-    * quantities_ - support for physical quantities with units (mV, ms, etc.)
-    * neo_ - electrophysiology data manipulations
-    * tqdm_ - progress bar
-    * six_ - Python 2 and 3 compatibility utilities
-
-These packages are automatically installed when you run ``pip install elephant``.
-
-The following packages are optional in order to run certain parts of Elephant:
-
-    * `pandas <https://pypi.org/project/pandas/>`_ - for the :doc:`pandas_bridge <reference/pandas_bridge>` module
-    * `scikit-learn <https://pypi.org/project/scikit-learn/>`_ - for the :doc:`ASSET <reference/asset>` analysis
-    * `nose <https://pypi.org/project/nose/>`_ - for running tests
-    * `numpydoc <https://pypi.org/project/numpydoc/>`_ and `sphinx <https://pypi.org/project/Sphinx/>`_ - for building the documentation
-
-These and above packages are automatically installed when you run ``pip install elephant[extras]``.
-
-.. _`Python`: http://python.org/
-.. _`numpy`: http://www.numpy.org/
-.. _`scipy`: https://www.scipy.org/
-.. _`quantities`: http://pypi.python.org/pypi/quantities
-.. _`neo`: http://pypi.python.org/pypi/neo
-.. _`pip`: http://pypi.python.org/pypi/pip
-.. _Anaconda: https://docs.anaconda.com/anaconda/install/
-.. _`Conda environment`: https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html
-.. _`tqdm`: https://pypi.org/project/tqdm/
-.. _`six`: https://pypi.org/project/six/
-.. _requirements: https://github.com/NeuralEnsemble/elephant/blob/master/requirements/requirements.txt
-.. _PyPI: https://pypi.org/
+/annex/objects/MD5-s3780--24de5e0d1dc3d2d73488afab86f3418f

+ 1 - 0
code/elephant/doc/maintainers_guide.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s4780--ba87736b2da033c21f9461c2c58a7dc3

+ 1 - 190
code/elephant/doc/make.bat

@@ -1,190 +1 @@
-@ECHO OFF
-
-REM Command file for Sphinx documentation
-
-if "%SPHINXBUILD%" == "" (
-	set SPHINXBUILD=sphinx-build
-)
-set BUILDDIR=_build
-set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
-set I18NSPHINXOPTS=%SPHINXOPTS% .
-if NOT "%PAPER%" == "" (
-	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
-	set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
-)
-
-if "%1" == "" goto help
-
-if "%1" == "help" (
-	:help
-	echo.Please use `make ^<target^>` where ^<target^> is one of
-	echo.  html       to make standalone HTML files
-	echo.  dirhtml    to make HTML files named index.html in directories
-	echo.  singlehtml to make a single large HTML file
-	echo.  pickle     to make pickle files
-	echo.  json       to make JSON files
-	echo.  htmlhelp   to make HTML files and a HTML help project
-	echo.  qthelp     to make HTML files and a qthelp project
-	echo.  devhelp    to make HTML files and a Devhelp project
-	echo.  epub       to make an epub
-	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter
-	echo.  text       to make text files
-	echo.  man        to make manual pages
-	echo.  texinfo    to make Texinfo files
-	echo.  gettext    to make PO message catalogs
-	echo.  changes    to make an overview over all changed/added/deprecated items
-	echo.  linkcheck  to check all external links for integrity
-	echo.  doctest    to run all doctests embedded in the documentation if enabled
-	goto end
-)
-
-if "%1" == "clean" (
-	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
-	del /q /s %BUILDDIR%\*
-	goto end
-)
-
-if "%1" == "html" (
-	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
-	goto end
-)
-
-if "%1" == "dirhtml" (
-	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
-	goto end
-)
-
-if "%1" == "singlehtml" (
-	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
-	goto end
-)
-
-if "%1" == "pickle" (
-	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished; now you can process the pickle files.
-	goto end
-)
-
-if "%1" == "json" (
-	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished; now you can process the JSON files.
-	goto end
-)
-
-if "%1" == "htmlhelp" (
-	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished; now you can run HTML Help Workshop with the ^
-.hhp project file in %BUILDDIR%/htmlhelp.
-	goto end
-)
-
-if "%1" == "qthelp" (
-	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished; now you can run "qcollectiongenerator" with the ^
-.qhcp project file in %BUILDDIR%/qthelp, like this:
-	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Elephant.qhcp
-	echo.To view the help file:
-	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Elephant.ghc
-	goto end
-)
-
-if "%1" == "devhelp" (
-	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished.
-	goto end
-)
-
-if "%1" == "epub" (
-	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The epub file is in %BUILDDIR%/epub.
-	goto end
-)
-
-if "%1" == "latex" (
-	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
-	goto end
-)
-
-if "%1" == "text" (
-	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The text files are in %BUILDDIR%/text.
-	goto end
-)
-
-if "%1" == "man" (
-	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The manual pages are in %BUILDDIR%/man.
-	goto end
-)
-
-if "%1" == "texinfo" (
-	%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
-	goto end
-)
-
-if "%1" == "gettext" (
-	%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
-	goto end
-)
-
-if "%1" == "changes" (
-	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.The overview file is in %BUILDDIR%/changes.
-	goto end
-)
-
-if "%1" == "linkcheck" (
-	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Link check complete; look for any errors in the above output ^
-or in %BUILDDIR%/linkcheck/output.txt.
-	goto end
-)
-
-if "%1" == "doctest" (
-	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Testing of doctests in the sources finished, look at the ^
-results in %BUILDDIR%/doctest/output.txt.
-	goto end
-)
-
-:end
+/annex/objects/MD5-s5100--997eccc0d9764057341b3b340918e6f7

+ 1 - 35
code/elephant/doc/modules.rst

@@ -1,35 +1 @@
-****************************
-Function Reference by Module
-****************************
-
-.. toctree::
-   :maxdepth: 1
-
-   reference/asset
-   reference/causality
-   reference/cell_assembly_detection
-   reference/change_point_detection
-   reference/conversion
-   reference/cubic
-   reference/current_source_density
-   reference/gpfa
-   reference/kernels
-   reference/neo_tools
-   reference/pandas_bridge
-   reference/parallel
-   reference/phase_analysis
-   reference/signal_processing
-   reference/spade
-   reference/spectral
-   reference/spike_train_generation
-   reference/spike_train_surrogates
-   reference/sta
-   reference/statistics
-   reference/unitary_event_analysis
-   reference/waveform_features
-
-
-.. toctree::
-   :maxdepth: 2
-
-   reference/_spike_train_processing
+/annex/objects/MD5-s772--1580ace450f6439c202125e85cd7da07

+ 1 - 0
code/elephant/doc/reference/_spike_train_processing.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s182--82fa15302738cb8482ee11d3fcaae07c

+ 1 - 14
code/elephant/doc/reference/asset.rst

@@ -1,14 +1 @@
-===================================================
-Analysis of Sequences of Synchronous EvenTs (ASSET)
-===================================================
-
-.. automodule:: elephant.asset
-
-
-References
-----------
-
-.. bibliography:: ../bib/elephant.bib
-   :labelprefix: as
-   :keyprefix: asset-
-   :style: unsrt
+/annex/objects/MD5-s310--9be984cbafd3118658fcdd3e73742294

+ 1 - 0
code/elephant/doc/reference/causality.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s101--f0d20b4a0b246574e3636b87c855b57b

+ 1 - 0
code/elephant/doc/reference/cell_assembly_detection.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s159--5951921a7287b8c1860214cf5069e94d

+ 1 - 0
code/elephant/doc/reference/change_point_detection.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s176--d3d09a2d47c3c04c20082638f68c14b9

+ 1 - 6
code/elephant/doc/reference/conversion.rst

@@ -1,6 +1 @@
-=============================
-BinnedSpikeTrain (conversion)
-=============================
-
-.. automodule:: elephant.conversion
-   :members:
+/annex/objects/MD5-s127--8e466ef50d52a4e24dca9f83ce28d6de

+ 1 - 6
code/elephant/doc/reference/cubic.rst

@@ -1,6 +1 @@
-============================================================
-Cumulant Based Inference of higher-order Correlation (CuBIC)
-============================================================
-
-.. automodule:: elephant.cubic
-   :members:
+/annex/objects/MD5-s228--df0773b1559944480bc51f06348c3d91

+ 1 - 6
code/elephant/doc/reference/current_source_density.rst

@@ -1,6 +1 @@
-===============================
-Current source density analysis
-===============================
-
-.. automodule:: elephant.current_source_density
-   :members:
+/annex/objects/MD5-s158--8da51976bd0dad9aef5f4dfd893b5723

+ 1 - 0
code/elephant/doc/reference/gpfa.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s156--e1108114fac15e82227346f86b7fa85a

+ 1 - 5
code/elephant/doc/reference/kernels.rst

@@ -1,5 +1 @@
-=======
-Kernels
-=======
-
-.. automodule:: elephant.kernels
+/annex/objects/MD5-s58--9f6c75995fa82e8959bf80f91b3fe9f7

+ 1 - 6
code/elephant/doc/reference/neo_tools.rst

@@ -1,6 +1 @@
-=====================
-Neo objects utilities
-=====================
-
-.. automodule:: elephant.neo_tools
-   :members:
+/annex/objects/MD5-s115--838c933f8336e8466b20d0e085564c66

+ 1 - 6
code/elephant/doc/reference/pandas_bridge.rst

@@ -1,6 +1 @@
-============================
-Bridge to the pandas library
-============================
-
-.. automodule:: elephant.pandas_bridge
-   :members:
+/annex/objects/MD5-s140--b41b0dbfc4236d9bac78443367c48f68

+ 1 - 0
code/elephant/doc/reference/parallel.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s62--1c6368f16636e8e23b6c86ded8ef1d73

+ 1 - 0
code/elephant/doc/reference/phase_analysis.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s132--b82728acc4c9f495d14691c99c3ab3f1

+ 1 - 13
code/elephant/doc/reference/signal_processing.rst

@@ -1,13 +1 @@
-=================
-Signal processing
-=================
-
-.. testsetup::
-
-   import numpy as np
-   from quantities import mV, s, Hz
-   import neo
-   from elephant.signal_processing import zscore
-
-.. automodule:: elephant.signal_processing
-   :members:
+/annex/objects/MD5-s249--c65b01e17907309b6aae8ff4866afa94

+ 1 - 0
code/elephant/doc/reference/spade.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s173--3483035bed5ceafaf6d2a7a8ec4d377f

+ 1 - 6
code/elephant/doc/reference/spectral.rst

@@ -1,6 +1 @@
-=================
-Spectral analysis
-=================
-
-.. automodule:: elephant.spectral
-   :members:
+/annex/objects/MD5-s102--d66cc3efdef18856eb59bcbf78fcf50e

+ 1 - 13
code/elephant/doc/reference/spike_train_correlation.rst

@@ -1,13 +1 @@
-=======================
-Spike train correlation
-=======================
-
-.. testsetup::
-
-   from quantities import Hz, s, ms
-   from elephant.spike_train_correlation import corrcoef
-
-
-.. automodule:: elephant.spike_train_correlation
-   :members:
-   :exclude-members: cch, sttc
+/annex/objects/MD5-s233--5310d034effd9b19f213699c5751d780

+ 1 - 8
code/elephant/doc/reference/spike_train_dissimilarity.rst

@@ -1,8 +1 @@
-=========================
-Spike train dissimilarity
-=========================
-
-
-.. automodule:: elephant.spike_train_dissimilarity
-   :members:
-
+/annex/objects/MD5-s131--dbd61fc54190a2661643c6658747ee3e

+ 1 - 11
code/elephant/doc/reference/spike_train_generation.rst

@@ -1,11 +1 @@
-=================================
-Stochastic spike train generation
-=================================
-
-.. testsetup::
-
-   from elephant.spike_train_generation import homogeneous_poisson_process, homogeneous_gamma_process
-
-
-.. automodule:: elephant.spike_train_generation
-   :members:
+/annex/objects/MD5-s284--70fbd440defb172755b4b2f26a1768b6

+ 1 - 12
code/elephant/doc/reference/spike_train_surrogates.rst

@@ -1,12 +1 @@
-======================
-Spike train surrogates
-======================
-
-
-.. testsetup::
-
-   from elephant.spike_train_surrogates import shuffle_isis, randomise_spikes, jitter_spikes, dither_spikes, dither_spike_train
-
-
-.. automodule:: elephant.spike_train_surrogates
-   :members:
+/annex/objects/MD5-s278--bc3adc9949200f937d49eeff3f08706d

+ 1 - 0
code/elephant/doc/reference/spike_train_synchrony.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s241--6581d04418081afc61056f10ebdd43c7

+ 1 - 6
code/elephant/doc/reference/sta.rst

@@ -1,6 +1 @@
-=======================
-Spike-triggered average
-=======================
-
-.. automodule:: elephant.sta
-   :members:
+/annex/objects/MD5-s115--8e271c5e745eb4910886aee12a3c0d92

+ 1 - 5
code/elephant/doc/reference/statistics.rst

@@ -1,5 +1 @@
-==========================
-Statistics of spike trains
-==========================
-
-.. automodule:: elephant.statistics
+/annex/objects/MD5-s118--bc119628f8fe31386c28337e35c28abe

+ 1 - 0
code/elephant/doc/reference/toctree/kernels/elephant.kernels.AlphaKernel.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s546--1563997a97f7d13b3b104ed64f5c9612

+ 1 - 0
code/elephant/doc/reference/toctree/kernels/elephant.kernels.EpanechnikovLikeKernel.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s656--0a39e567ef299963b1eace0558b5e74d

+ 1 - 0
code/elephant/doc/reference/toctree/kernels/elephant.kernels.ExponentialKernel.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s606--6d8a488e110e81c2349e0dd5a53d63df

+ 1 - 0
code/elephant/doc/reference/toctree/kernels/elephant.kernels.GaussianKernel.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s576--c4f09b2c11979379f06d79493e0f73af

+ 1 - 0
code/elephant/doc/reference/toctree/kernels/elephant.kernels.LaplacianKernel.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s586--03a46e98341569c6c60496e2516a9a37

+ 1 - 0
code/elephant/doc/reference/toctree/kernels/elephant.kernels.RectangularKernel.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s606--7213680e4fecb342f30ab09f8a88114b

+ 1 - 0
code/elephant/doc/reference/toctree/kernels/elephant.kernels.TriangularKernel.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s596--a4e75caa26a81f172bac1d86a3a8ca28

+ 1 - 23
code/elephant/doc/reference/unitary_event_analysis.rst

@@ -1,23 +1 @@
-======================
-Unitary Event Analysis
-======================
-
-.. automodule:: elephant.unitary_event_analysis
-
-Author Contributions
---------------------
-
-- Vahid Rostami (VR)
-- Sonja Gruen (SG)
-- Markus Diesmann (MD)
-
-VR implemented the method, SG and MD provided guidance.
-
-
-References
-----------
-
-.. bibliography:: ../bib/elephant.bib
-   :labelprefix: ue
-   :keyprefix: unitary_event_analysis-
-   :style: unsrt
+/annex/objects/MD5-s421--cb6d20ada4cab45c8f3941e9e0b620bb

+ 1 - 0
code/elephant/doc/reference/waveform_features.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s111--8f6d3a09a9132e3d5a09493b67d8057b

+ 1 - 334
code/elephant/doc/release_notes.rst

@@ -1,334 +1 @@
-*************
-Release Notes
-*************
-
-Elephant 0.8.0 release notes
-============================
-
-New features
-------------
-* The `parallel` module is a new experimental module (https://github.com/NeuralEnsemble/elephant/pull/307) to run python functions concurrently. Supports native (pythonic) ProcessPollExecutor and MPI. Not limited to Elephant functional.
-* Added an optional `refractory_period` argument, set to None by default, to `dither_spikes` function (https://github.com/NeuralEnsemble/elephant/pull/297).
-* Added `cdf` and `icdf` functions in Kernel class to correctly estimate the median index, needed for `instantaneous_rate` function in statistics.py (https://github.com/NeuralEnsemble/elephant/pull/313).
-* Added an optional `center_kernel` argument, set to True by default (to behave as in Elephant <0.8.0 versions) to `instantaneous_rate` function in statistics.py (https://github.com/NeuralEnsemble/elephant/pull/313).
-
-New tutorials
--------------
-* Analysis of Sequences of Synchronous EvenTs (ASSET) tutorial: https://elephant.readthedocs.io/en/latest/tutorials/asset.html
-* Parallel module tutorial: https://elephant.readthedocs.io/en/latest/tutorials/parallel.html
-
-Optimization
-------------
-* Optimized ASSET runtime by a factor of 10 and more (https://github.com/NeuralEnsemble/elephant/pull/259, https://github.com/NeuralEnsemble/elephant/pull/333).
-
-Python 2.7 and 3.5 deprecation
-------------------------------
-Python 2.7 and 3.5 are deprecated and will not be maintained by the end of 2020. Switch to Python 3.6+.
-
-Breaking changes
-----------------
-* Naming convention changes (`binsize` -> `bin_size`, etc.) in almost all Elephant functions (https://github.com/NeuralEnsemble/elephant/pull/316).
-
-
-Elephant 0.7.0 release notes
-============================
-
-Breaking changes
-----------------
-* [gpfa] GPFA dimensionality reduction method is rewritten in easy-to-use scikit-learn class style format (https://github.com/NeuralEnsemble/elephant/pull/287):
-
-.. code-block:: python
-
-    gpfa = GPFA(bin_size=20*pq.ms, x_dim=8)
-    results = gpfa.fit_transform(spiketrains, returned_data=['xorth', 'xsm'])
-
-New tutorials
--------------
-* GPFA dimensionality reduction method: https://elephant.readthedocs.io/en/latest/tutorials/gpfa.html
-* Unitary Event Analysis of coordinated spiking activity: https://elephant.readthedocs.io/en/latest/tutorials/unitary_event_analysis.html
-* (Introductory) statistics module: https://elephant.readthedocs.io/en/latest/tutorials/statistics.html
-
-Deprecations
-------------
-* **Python 2.7 support will be dropped on Dec 31, 2020.** Please switch to Python 3.6, 3.7, or 3.8.
-* [spike train generation] `homogeneous_poisson_process_with_refr_period()`, introduced in v0.6.4, is deprecated and will be deleted in v0.8.0. Use `homogeneous_poisson_process(refractory_period=...)` instead.
-* [pandas bridge] pandas\_bridge module is deprecated and will be deleted in v0.8.0.
-
-New features
-------------
-* New documentation style, guidelines, tutorials, and more (https://github.com/NeuralEnsemble/elephant/pull/294).
-* Python 3.8 support (https://github.com/NeuralEnsemble/elephant/pull/282).
-* [spike train generation] Added `refractory_period` flag in `homogeneous_poisson_process()` (https://github.com/NeuralEnsemble/elephant/pull/292) and `inhomogeneous_poisson_process()` (https://github.com/NeuralEnsemble/elephant/pull/295) functions. The default is `refractory_period=None`, meaning no refractoriness.
-* [spike train correlation] `cross_correlation_histogram()` supports different t_start and t_stop of input spiketrains.
-* [waveform features] `waveform_width()` function extracts the width (trough-to-peak TTP) of a waveform (https://github.com/NeuralEnsemble/elephant/pull/279).
-* [signal processing] Added `scaleopt` flag in `pairwise_cross_correlation()` to mimic the behavior of Matlab's `xcorr()` function (https://github.com/NeuralEnsemble/elephant/pull/277). The default is `scaleopt=unbiased` to be consistent with the previous versions of Elephant.
-* [spike train surrogates] Joint-ISI dithering method via `JointISI` class (https://github.com/NeuralEnsemble/elephant/pull/275).
-
-Bug fixes
----------
-* [spike train correlation] Fix CCH Border Correction (https://github.com/NeuralEnsemble/elephant/pull/298). Now, the border correction in `cross_correlation_histogram()` correctly reflects the number of bins used for the calculation at each lag. The correction factor is now unity at full overlap.
-* [phase analysis] `spike_triggered_phase()` incorrect behavior when the spike train and the analog signal had different time units (https://github.com/NeuralEnsemble/elephant/pull/270).
-
-Performance
------------
-* [spade] SPADE x7 speedup (https://github.com/NeuralEnsemble/elephant/pull/280, https://github.com/NeuralEnsemble/elephant/pull/285, https://github.com/NeuralEnsemble/elephant/pull/286). Moreover, SPADE is now able to handle all surrogate types that are available in Elephant, as well as more types of statistical corrections.
-* [conversion] Fast & memory-efficient `covariance()` and Pearson `corrcoef()` (https://github.com/NeuralEnsemble/elephant/pull/274). Added flag `fast=True` by default in both functions.
-* [conversion] Use fast fftconvolve instead of np.correlate in `cross_correlation_histogram()` (https://github.com/NeuralEnsemble/elephant/pull/273).
-
-
-Elephant 0.6.4 release notes
-============================
-
-This release has been made for the "1st Elephant User Workshop" (https://www.humanbrainproject.eu/en/education/participatecollaborate/infrastructure-events-trainings/1st-elephant-user-workshop-accelerate-structured-and-reproducibl).
-
-
-Main features
--------------
-* neo v0.8.0 compatible
-
-
-New modules
------------
-* GPFA - Gaussian-process factor analysis - dimensionality reduction method for neural trajectory visualization (https://github.com/NeuralEnsemble/elephant/pull/233). _Note: the API could change in the future._
-
-
-Bug fixes
----------
-* [signal processing] Keep `array_annotations` in the output of signal processing functions (https://github.com/NeuralEnsemble/elephant/pull/258).
-* [SPADE] Fixed the calculation of the duration of a pattern in the output (https://github.com/NeuralEnsemble/elephant/pull/254).
-* [statistics] Fixed automatic kernel selection yields incorrect values (https://github.com/NeuralEnsemble/elephant/pull/246).
-
-
-Improvements
-------------
-* Vectorized `spike_time_tiling_coefficient()` function - got rid of a double for-loop (https://github.com/NeuralEnsemble/elephant/pull/244)
-* Reduced the number of warnings during the tests (https://github.com/NeuralEnsemble/elephant/pull/238).
-* Removed unused debug code in `spade/fast_fca.py` (https://github.com/NeuralEnsemble/elephant/pull/249).
-* Improved doc string of `covariance()` and `corrcoef()` (https://github.com/NeuralEnsemble/elephant/pull/260).
-
-
-
-Elephant 0.6.3 release notes
-============================
-July 22nd 2019
-
-The release v0.6.3 is mostly about improving maintenance.
-
-New functions
--------------
-* `waveform_features` module
-    * Waveform signal-to-noise ratio (https://github.com/NeuralEnsemble/elephant/pull/219).
-* Added support for Butterworth `sosfiltfilt` - numerically stable (in particular, higher order) filtering (https://github.com/NeuralEnsemble/elephant/pull/234).
-
-Bug fixes
----------
-* Fixed neo version typo in requirements file (https://github.com/NeuralEnsemble/elephant/pull/218)
-* Fixed broken docs (https://github.com/NeuralEnsemble/elephant/pull/230, https://github.com/NeuralEnsemble/elephant/pull/232)
-* Fixed issue with 32-bit arch (https://github.com/NeuralEnsemble/elephant/pull/229)
-
-Other changes
--------------
-* Added issue templates (https://github.com/NeuralEnsemble/elephant/pull/226)
-* Single VERSION file (https://github.com/NeuralEnsemble/elephant/pull/231)
-
-Elephant 0.6.2 release notes
-============================
-April 23rd 2019
-
-New functions
--------------
-* `signal_processing` module
-    * New functions to calculate the area under a time series and the derivative of a time series.
-
-Other changes
--------------
-* Added support to initialize binned spike train representations with a matrix
-* Multiple bug fixes
-
-
-Elephant 0.6.1 release notes
-============================
-April 1st 2019
-
-New functions
--------------
-* `signal_processing` module
-    * New function to calculate the cross-correlation function for analog signals.
-* `spade` module
-    * Spatio-temporal spike pattern detection now includes the option to assess significance also based on time-lags of patterns, in addition to patterns size and frequency (referred to as 3D pattern spectrum).
-
-Other changes
--------------
-* This release fixes a number of compatibility issues in relation to API breaking changes in the Neo library.
-* Fixed error in STTC calculation (spike time tiling coefficient)
-* Minor bug fixes
-
-
-Elephant 0.6.0 release notes
-============================
-October 12th 2018
-
-New functions
--------------
-* `cell_assembly_detection` module
-    * New function to detect higher-order correlation structures such as patterns in parallel spike trains based on Russo et al, 2017.
-*  **wavelet_transform()** function in `signal_prosessing.py` module
-    * Function for computing wavelet transform of a given time series based on Le van Quyen et al. (2001)
-
-Other changes
--------------
-* Switched to multiple `requirements.txt` files which are directly read into the `setup.py`
-* `instantaneous_rate()` accepts now list of spiketrains
-* Minor bug fixes
-
-
-Elephant 0.5.0 release notes
-============================
-April 4nd 2018
-
-New functions
--------------
-* `change_point_detection` module:
-    * New function to detect changes in the firing rate
-* `spike_train_correlation` module:
-    * New function to calculate the spike time tiling coefficient
-* `phase_analysis` module:
-    * New function to extract spike-triggered phases of an AnalogSignal
-* `unitary_event_analysis` module:
-    * Added new unit test to the UE function to verify the method based on data of a recent [Re]Science publication
-
-Other changes
--------------
-* Minor bug fixes
-
-
-Elephant 0.4.3 release notes
-============================
-March 2nd 2018
-
-Other changes
--------------
-* Bug fixes in `spade` module:
-    * Fixed an incompatibility with the latest version of an external library
-
-
-Elephant 0.4.2 release notes
-============================
-March 1st 2018
-
-New functions
--------------
-* `spike_train_generation` module:
-    * **inhomogeneous_poisson()** function
-* Modules for Spatio Temporal Pattern Detection (SPADE) `spade_src`:
-    * Module SPADE: `spade.py`
-* Module `statistics.py`:
-    * Added CV2 (coefficient of variation for non-stationary time series)
-* Module `spike_train_correlation.py`:
-    * Added normalization in **cross-correlation histogram()** (CCH)
-
-Other changes
--------------
-* Adapted the `setup.py` to automatically install the spade modules including the compiled `C` files `fim.so`
-* Included testing environment for MPI in `travis.yml`
-* Changed function arguments  in `current_source_density.py` to `neo.AnalogSignal` instead list of `neo.AnalogSignal` objects
-* Fixes to travis and setup configuration files
-* Fixed bug in ISI function `isi()`, `statistics.py` module
-* Fixed bug in `dither_spikes()`, `spike_train_surrogates.py`
-* Minor bug fixes
-
-
-Elephant 0.4.1 release notes
-============================
-March 23rd 2017
-
-Other changes
--------------
-* Fix in `setup.py` to correctly import the current source density module
-
-
-Elephant 0.4.0 release notes
-============================
-March 22nd 2017
-
-New functions
--------------
-* `spike_train_generation` module:
-    * peak detection: **peak_detection()**
-* Modules for Current Source Density: `current_source_density_src`
-    * Module Current Source Density: `KCSD.py`
-    * Module for Inverse Current Source Density: `icsd.py`
-
-API changes
------------
-* Interoperability between Neo 0.5.0 and Elephant
-    * Elephant has adapted its functions to the changes in Neo 0.5.0,
-      most of the functionality behaves as before
-    * See Neo documentation for recent changes: http://neo.readthedocs.io/en/latest/whatisnew.html
-
-Other changes
--------------
-* Fixes to travis and setup configuration files.
-* Minor bug fixes.
-* Added module `six` for Python 2.7 backwards compatibility
-
-
-Elephant 0.3.0 release notes
-============================
-April 12st 2016
-
-New functions
--------------
-* `spike_train_correlation` module:
-    * cross correlation histogram: **cross_correlation_histogram()**
-* `spike_train_generation` module:
-    * single interaction process (SIP): **single_interaction_process()**
-    * compound Poisson process (CPP): **compound_poisson_process()**
-* `signal_processing` module:
-    * analytic signal: **hilbert()**
-* `sta` module:
-    * spike field coherence: **spike_field_coherence()**
-* Module to represent kernels: `kernels` module
-* Spike train metrics / dissimilarity / synchrony measures: `spike_train_dissimilarity` module
-* Unitary Event (UE) analysis: `unitary_event_analysis` module
-* Analysis of Sequences of Synchronous EvenTs (ASSET): `asset` module
-
-API changes
------------
-* Function **instantaneous_rate()** now uses kernels as objects defined in the `kernels` module. The previous implementation of the function using the `make_kernel()` function is deprecated, but still temporarily available as `oldfct_instantaneous_rate()`.
-
-Other changes
--------------
-* Fixes to travis and readthedocs configuration files.
-
-
-Elephant 0.2.1 release notes
-============================
-February 18th 2016
-
-Other changes
--------------
-Minor bug fixes.
-
-
-Elephant 0.2.0 release notes
-============================
-September 22nd 2015
-
-New functions
--------------
-* Added covariance function **covariance()** in the `spike_train_correlation` module
-* Added complexity pdf **complexity_pdf()** in the `statistics` module
-* Added spike train extraction from analog signals via threshold detection the in `spike_train_generation` module
-* Added **coherence()** function for analog signals in the `spectral` module
-* Added **Cumulant Based Inference for higher-order of Correlation (CuBIC)** in the `cubic` module for correlation analysis of parallel recorded spike trains
-
-API changes
------------
-* **Optimized kernel bandwidth** in `rate_estimation` function: Calculates the optimized kernel width when the paramter kernel width is specified as `auto`
-
-Other changes
--------------
-* **Optimized creation of sparse matrices**: The creation speed of the sparse matrix inside the `BinnedSpikeTrain` class is optimized
-* Added **Izhikevich neuron simulator** in the `make_spike_extraction_test_data` module
-* Minor improvements to the test and continous integration infrastructure
+/annex/objects/MD5-s22657--4d7f8371dca1bb747283f7b205e9fee6

+ 1 - 0
code/elephant/doc/style_guide.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s29332--dd8912d63c59e90212c433fe6ee3c28b

+ 1 - 0
code/elephant/doc/tutorials.rst

@@ -0,0 +1 @@
+/annex/objects/MD5-s2256--9af9607f4e68c6574fda5aaf9d48057e

+ 1 - 0
code/elephant/doc/tutorials/asset.ipynb

@@ -0,0 +1 @@
+/annex/objects/MD5-s334296--893412a7f36e61d3f13140a63ec5ec32

+ 1 - 0
code/elephant/doc/tutorials/asset_showcase_500.nix

@@ -0,0 +1 @@
+/annex/objects/MD5-s44142233--516ad7e890890253fe20c2602679a9d5

+ 1 - 0
code/elephant/doc/tutorials/gpfa.ipynb

@@ -0,0 +1 @@
+/annex/objects/MD5-s1578043--ead1eaa613b19c816fd610c59b19b5b6

+ 1 - 0
code/elephant/doc/tutorials/parallel.ipynb

@@ -0,0 +1 @@
+/annex/objects/MD5-s13125--733cf87061223d4bd1cea852179d6067

+ 1 - 0
code/elephant/doc/tutorials/statistics.ipynb

@@ -0,0 +1 @@
+/annex/objects/MD5-s19838--121780ca78da46fb7041e9185583bdb4

+ 1 - 0
code/elephant/doc/tutorials/unitary_event_analysis.ipynb

@@ -0,0 +1 @@
+/annex/objects/MD5-s15368--6237d07e0ac46dfa81410e432db5592c

+ 1 - 0
code/elephant/elephant/VERSION

@@ -0,0 +1 @@
+/annex/objects/MD5-s5--778cf622c50bfcd4ed0298c65159386a

+ 1 - 50
code/elephant/elephant/__init__.py

@@ -1,50 +1 @@
-# -*- coding: utf-8 -*-
-"""
-Elephant is a package for the analysis of neurophysiology data, based on Neo.
-
-:copyright: Copyright 2014-2019 by the Elephant team, see `doc/authors.rst`.
-:license: Modified BSD, see LICENSE.txt for details.
-"""
-
-from . import (statistics,
-               spike_train_generation,
-               spike_train_correlation,
-               unitary_event_analysis,
-               cubic,
-               spectral,
-               kernels,
-               spike_train_dissimilarity,
-               spike_train_surrogates,
-               signal_processing,
-               current_source_density,
-               change_point_detection,
-               phase_analysis,
-               sta,
-               conversion,
-               neo_tools,
-               cell_assembly_detection,
-               spade,
-               waveform_features,
-               gpfa)
-
-# not included modules on purpose:
-#   parallel: avoid warns when elephant is imported
-
-try:
-    from . import asset
-    from . import spade
-except ImportError:
-    # requirements-extras are missing
-    # please install Elephant with `pip install elephant[extras]`
-    pass
-
-
-def _get_version():
-    import os
-    elephant_dir = os.path.dirname(__file__)
-    with open(os.path.join(elephant_dir, 'VERSION')) as version_file:
-        version = version_file.read().strip()
-    return version
-
-
-__version__ = _get_version()
+/annex/objects/MD5-s1394--567d96d6324a475c7fb7da4ce2aca6d3

File diff suppressed because it is too large
+ 1 - 1936
code/elephant/elephant/asset.py


+ 1 - 0
code/elephant/elephant/causality/__init__.py

@@ -0,0 +1 @@
+/annex/objects/MD5-s0--d41d8cd98f00b204e9800998ecf8427e

+ 1 - 0
code/elephant/elephant/causality/granger.py

@@ -0,0 +1 @@
+/annex/objects/MD5-s19241--8670cea75564edf66bd899320932d383

File diff suppressed because it is too large
+ 1 - 1210
code/elephant/elephant/cell_assembly_detection.py


+ 1 - 503
code/elephant/elephant/change_point_detection.py

@@ -1,503 +1 @@
-# -*- coding: utf-8 -*-
-
-"""
-This algorithm determines if a spike train `spk` can be considered as
-stationary process (constant firing rate) or not as stationary process (i.e.
-presence of one or more points at which the rate increases or decreases). In
-case of non-stationarity, the output is a list of detected Change Points (CPs).
-Essentially, a det of  two-sided window of width `h` (`_filter(t, h, spk)`)
-slides over the spike train within the time `[h, t_final-h]`. This generates a
-`_filter_process(time_step, h, spk)` that assigns at each time `t` the
-difference between a spike lying in the right and left window. If at any time
-`t` this difference is large 'enough' is assumed the presence of a rate Change
-Point in a neighborhood of `t`. A threshold `test_quantile` for the maximum of
-the filter_process (max difference of spike count between the left and right
-window) is derived based on asymptotic considerations. The procedure is
-repeated for an arbitrary set of windows, with different size `h`.
-
-Examples
---------
-The following applies multiple_filter_test to a spike trains.
-
-    >>> import quantities as pq
-    >>> import neo
-    >>> from elephant.change_point_detection import multiple_filter_test
-    ...
-    >>> test_array = [1.1,1.2,1.4,   1.6,1.7,1.75,1.8,1.85,1.9,1.95]
-    >>> st = neo.SpikeTrain(test_array, units='s', t_stop = 2.1)
-    >>> window_size = [0.5]*pq.s
-    >>> t_fin = 2.1*pq.s
-    >>> alpha = 5.0
-    >>> num_surrogates = 10000
-    >>> change_points = multiple_filter_test(window_size, st, t_fin, alpha,
-    ...                 num_surrogates, time_step = 0.5*pq.s)
-
-References
-----------
-Messer, M., Kirchner, M., Schiemann, J., Roeper, J., Neininger, R., &
-Schneider, G. (2014). A multiple filter test for the detection of rate changes
-in renewal processes with varying variance. The Annals of Applied Statistics,
-8(4),2027-2067.
-
-Original code
--------------
-Adapted from the published R implementation:
-DOI: 10.1214/14-AOAS782SUPP;.r
-
-"""
-
-from __future__ import division, print_function, unicode_literals
-
-import numpy as np
-import quantities as pq
-
-from elephant.utils import deprecated_alias
-
-__all__ = [
-    "multiple_filter_test",
-    "empirical_parameters"
-]
-
-
-@deprecated_alias(dt='time_step')
-def multiple_filter_test(window_sizes, spiketrain, t_final, alpha,
-                         n_surrogates, test_quantile=None, test_param=None,
-                         time_step=None):
-    """
-    Detects change points.
-
-    This function returns the detected change points, that correspond to the
-    maxima of the `_filter_processes`. These are the processes generated by
-    sliding the windows of step `time_step`; at each step the difference
-    between spike on the right and left window is calculated.
-
-    Parameters
-    ----------
-    window_sizes : list of quantity objects
-                list that contains windows sizes
-    spiketrain : neo.SpikeTrain, numpy array or list
-        spiketrain objects to analyze
-    t_final : quantity
-        final time of the spike train which is to be analysed
-    alpha : float
-        alpha-quantile in range [0, 100] for the set of maxima of the limit
-        processes
-    n_surrogates : integer
-        numbers of simulated limit processes
-    test_quantile : float
-        threshold for the maxima of the filter derivative processes, if any
-        of these maxima is larger than this value, it is assumed the
-        presence of a cp at the time corresponding to that maximum
-    time_step : quantity
-      resolution, time step at which the windows are slided
-    test_param : np.array of shape (3, num of window),
-        first row: list of `h`, second and third rows: empirical means and
-        variances of the limit process correspodning to `h`. This will be
-        used to normalize the `filter_process` in order to give to the
-        every maximum the same impact on the global statistic.
-
-    Returns
-    -------
-    cps : list of list
-       one list for each window size `h`, containing the points detected with
-       the corresponding `filter_process`. N.B.: only cps whose h-neighborhood
-       does not include previously detected cps (with smaller window h) are
-       added to the list.
-    """
-
-    if (test_quantile is None) and (test_param is None):
-        test_quantile, test_param = empirical_parameters(window_sizes, t_final,
-                                                         alpha, n_surrogates,
-                                                         time_step)
-    elif test_quantile is None:
-        test_quantile = empirical_parameters(window_sizes, t_final, alpha,
-                                             n_surrogates, time_step)[0]
-    elif test_param is None:
-        test_param = empirical_parameters(window_sizes, t_final, alpha,
-                                          n_surrogates, time_step)[1]
-
-    spk = spiketrain
-
-    #  List of lists of detected change points (CPs), to be returned
-    cps = []
-
-    for i, h in enumerate(window_sizes):
-        # automatic setting of time_step
-        dt_temp = h / 20 if time_step is None else time_step
-        # filter_process for window of size h
-        t, differences = _filter_process(dt_temp, h, spk, t_final, test_param)
-        time_index = np.arange(len(differences))
-        # Point detected with window h
-        cps_window = []
-        while np.max(differences) > test_quantile:
-            cp_index = np.argmax(differences)
-            # from index to time
-            cp = cp_index * dt_temp + h
-            # before repeating the procedure, the h-neighbourgs of detected CP
-            # are discarded, because rate changes into it are alrady explained
-            mask_fore = time_index > cp_index - int((h / dt_temp).simplified)
-            mask_back = time_index < cp_index + int((h / dt_temp).simplified)
-            differences[mask_fore & mask_back] = 0
-            # check if the neighbourhood of detected cp does not contain cps
-            # detected with other windows
-            neighbourhood_free = True
-            # iterate on lists of cps detected with smaller window
-            for j in range(i):
-                # iterate on CPs detected with the j-th smallest window
-                for c_pre in cps[j]:
-                    if c_pre - h < cp < c_pre + h:
-                        neighbourhood_free = False
-                        break
-            # if none of the previously detected CPs falls in the h-
-            # neighbourhood
-            if neighbourhood_free:
-                # add the current CP to the list
-                cps_window.append(cp)
-        # add the present list to the grand list
-        cps.append(cps_window)
-
-    return cps
-
-
-def _brownian_motion(t_in, t_fin, x_in, time_step):
-    """
-    Generate a Brownian Motion.
-
-    Parameters
-    ----------
-    t_in : quantities,
-        initial time
-    t_fin : quantities,
-         final time
-    x_in : float,
-        initial point of the process: _brownian_motio(0) = x_in
-    time_step : quantities,
-      resolution, time step at which brownian increments are summed
-    Returns
-    -------
-    Brownian motion on [t_in, t_fin], with resolution time_step and initial
-    state x_in
-    """
-
-    u = 1 * pq.s
-    try:
-        t_in_sec = t_in.rescale(u).magnitude
-    except ValueError:
-        raise ValueError("t_in must be a time quantity")
-    try:
-        t_fin_sec = t_fin.rescale(u).magnitude
-    except ValueError:
-        raise ValueError("t_fin must be a time quantity")
-    try:
-        dt_sec = time_step.rescale(u).magnitude
-    except ValueError:
-        raise ValueError("dt must be a time quantity")
-
-    x = np.random.normal(0, np.sqrt(dt_sec),
-                         size=int((t_fin_sec - t_in_sec) / dt_sec))
-    s = np.cumsum(x)
-    return s + x_in
-
-
-def _limit_processes(window_sizes, t_final, time_step):
-    """
-    Generate the limit processes (depending only on t_final and h), one for
-    each window size `h` in H. The distribution of maxima of these processes
-    is used to derive threshold `test_quantile` and parameters `test_param`.
-
-    Parameters
-    ----------
-        window_sizes : list of quantities
-            set of windows' size
-        t_final : quantity object
-            end of limit process
-        time_step : quantity object
-            resolution, time step at which the windows are slided
-
-    Returns
-    -------
-        limit_processes : list of numpy array
-            each entries contains the limit processes for each h,
-            evaluated in [h,T-h] with steps time_step
-    """
-
-    limit_processes = []
-
-    u = 1 * pq.s
-    try:
-        window_sizes_sec = window_sizes.rescale(u).magnitude
-    except ValueError:
-        raise ValueError("window_sizes must be a list of times")
-    try:
-        dt_sec = time_step.rescale(u).magnitude
-    except ValueError:
-        raise ValueError("time_step must be a time quantity")
-
-    w = _brownian_motion(0 * u, t_final, 0, time_step)
-
-    for h in window_sizes_sec:
-        # BM on [h,T-h], shifted in time t-->t+h
-        brownian_right = w[int(2 * h / dt_sec):]
-        # BM on [h,T-h], shifted in time t-->t-h
-        brownian_left = w[:int(-2 * h / dt_sec)]
-        # BM on [h,T-h]
-        brownian_center = w[int(h / dt_sec):int(-h / dt_sec)]
-
-        modul = np.abs(brownian_right + brownian_left - 2 * brownian_center)
-        limit_process_h = modul / (np.sqrt(2 * h))
-        limit_processes.append(limit_process_h)
-
-    return limit_processes
-
-
-@deprecated_alias(dt='time_step')
-def empirical_parameters(window_sizes, t_final, alpha, n_surrogates,
-                         time_step=None):
-    """
-    This function generates the threshold and the null parameters.
-    The`_filter_process_h` has been proved to converge (for t_fin,
-    h-->infinity) to a continuous functional of a Brownaian motion
-    ('limit_process'). Using a MonteCarlo technique, maxima of
-    these limit_processes are collected.
-
-    The threshold is defined as the alpha quantile of this set of maxima.
-    Namely:
-    test_quantile := alpha quantile of {max_(h in window_size)[
-                                 max_(t in [h, t_final-h])_limit_process_h(t)]}
-
-    Parameters
-    ----------
-    window_sizes : list of quantity objects
-        set of windows' size
-    t_final : quantity object
-        final time of the spike
-    alpha : float
-        alpha-quantile in range [0, 100]
-    n_surrogates : integer
-        numbers of simulated limit processes
-    time_step : quantity object
-        resolution, time step at which the windows are slided
-
-    Returns
-    -------
-    test_quantile : float
-        threshold for the maxima of the filter derivative processes, if any
-        of these maxima is larger than this value, it is assumed the
-        presence of a cp at the time corresponding to that maximum
-
-    test_param : np.array 3 * num of window,
-        first row: list of `h`, second and third rows: empirical means and
-        variances of the limit process correspodning to `h`. This will be
-        used to normalize the `filter_process` in order to give to the every
-        maximum the same impact on the global statistic.
-    """
-
-    # try:
-    #     window_sizes_sec = window_sizes.rescale(u)
-    # except ValueError:
-    #     raise ValueError("H must be a list of times")
-    # window_sizes_mag = window_sizes_sec.magnitude
-    # try:
-    #     t_final_sec = t_final.rescale(u)
-    # except ValueError:
-    #     raise ValueError("T must be a time quantity")
-    # t_final_mag = t_final_sec.magnitude
-
-    if not isinstance(window_sizes, pq.Quantity):
-        raise ValueError("window_sizes must be a list of time quantities")
-    if not isinstance(t_final, pq.Quantity):
-        raise ValueError("t_final must be a time quantity")
-    if not isinstance(n_surrogates, int):
-        raise TypeError("n_surrogates must be an integer")
-    if not (isinstance(time_step, pq.Quantity) or (time_step is None)):
-        raise ValueError("time_step must be a time quantity")
-
-    if t_final <= 0:
-        raise ValueError("t_final needs to be strictly positive")
-    if alpha * (100.0 - alpha) < 0:
-        raise ValueError("alpha needs to be in (0,100)")
-    if np.min(window_sizes) <= 0:
-        raise ValueError("window size needs to be strictly positive")
-    if np.max(window_sizes) >= t_final / 2:
-        raise ValueError("window size too large")
-    if time_step is not None:
-        for h in window_sizes:
-            if int(h.rescale('us')) % int(time_step.rescale('us')) != 0:
-                raise ValueError(
-                    "Every window size h must be a multiple of time_step")
-
-    # Generate a matrix M*: n X m where n = n_surrogates is the number of
-    # simulated limit processes and m is the number of chosen window sizes.
-    # Elements are: M*(i,h) = max(t in T)[`limit_process_h`(t)],
-    # for each h in H and surrogate i
-    maxima_matrix = []
-
-    for i in range(n_surrogates):
-        # mh_star = []
-        simu = _limit_processes(window_sizes, t_final, time_step)
-        # for i, h in enumerate(window_sizes_mag):
-        #     # max over time of the limit process generated with window h
-        #     m_h = np.max(simu[i])
-        #     mh_star.append(m_h)
-        # max over time of the limit process generated with window h
-        mh_star = [np.max(x) for x in simu]
-        maxima_matrix.append(mh_star)
-
-    maxima_matrix = np.asanyarray(maxima_matrix)
-
-    # these parameters will be used to normalize both the limit_processes (H0)
-    # and the filter_processes
-    null_mean = maxima_matrix.mean(axis=0)
-    null_var = maxima_matrix.var(axis=0)
-
-    # matrix normalization by mean and variance of the limit process, in order
-    # to give, for every h, the same impact on the global maximum
-    matrix_normalized = (maxima_matrix - null_mean) / np.sqrt(null_var)
-
-    great_maxs = np.max(matrix_normalized, axis=1)
-    test_quantile = np.percentile(great_maxs, 100.0 - alpha)
-    null_parameters = [window_sizes, null_mean, null_var]
-    test_param = np.asanyarray(null_parameters)
-
-    return test_quantile, test_param
-
-
-def _filter(t_center, window, spiketrain):
-    """
-    This function calculates the difference of spike counts in the left and
-    right side of a window of size h centered in t and normalized by its
-    variance. The variance of this count can be expressed as a combination of
-    mean and var of the I.S.I. lying inside the window.
-
-    Parameters
-    ----------
-    t_center : quantity
-        time on which the window is centered
-    window : quantity
-        window's size
-    spiketrain : list, numpy array or SpikeTrain
-        spike train to analyze
-
-    Returns
-    -------
-    difference : float,
-        difference of spike count normalized by its variance
-    """
-
-    u = 1 * pq.s
-    try:
-        t_sec = t_center.rescale(u).magnitude
-    except AttributeError:
-        raise ValueError("t must be a quantities object")
-    # tm = t_sec.magnitude
-    try:
-        h_sec = window.rescale(u).magnitude
-    except AttributeError:
-        raise ValueError("h must be a time quantity")
-    # hm = h_sec.magnitude
-    try:
-        spk_sec = spiketrain.rescale(u).magnitude
-    except AttributeError:
-        raise ValueError(
-            "spiketrain must be a list (array) of times or a neo spiketrain")
-
-    # cut spike-train on the right
-    train_right = spk_sec[(t_sec < spk_sec) & (spk_sec < t_sec + h_sec)]
-    # cut spike-train on the left
-    train_left = spk_sec[(t_sec - h_sec < spk_sec) & (spk_sec < t_sec)]
-    # spike count in the right side
-    count_right = train_right.size
-    # spike count in the left side
-    count_left = train_left.size
-    # form spikes to I.S.I
-    isi_right = np.diff(train_right)
-    isi_left = np.diff(train_left)
-
-    if isi_right.size == 0:
-        mu_ri = 0
-        sigma_ri = 0
-    else:
-        # mean of I.S.I inside the window
-        mu_ri = np.mean(isi_right)
-        # var of I.S.I inside the window
-        sigma_ri = np.var(isi_right)
-
-    if isi_left.size == 0:
-        mu_le = 0
-        sigma_le = 0
-    else:
-        mu_le = np.mean(isi_left)
-        sigma_le = np.var(isi_left)
-
-    if (sigma_le > 0) & (sigma_ri > 0):
-        s_quad = (sigma_ri / mu_ri**3) * h_sec + (sigma_le / mu_le**3) * h_sec
-    else:
-        s_quad = 0
-
-    if s_quad == 0:
-        difference = 0
-    else:
-        difference = (count_right - count_left) / np.sqrt(s_quad)
-
-    return difference
-
-
-def _filter_process(time_step, h, spk, t_final, test_param):
-    """
-    Given a spike train `spk` and a window size `h`, this function generates
-    the `filter derivative process` by evaluating the function `_filter`
-    in steps of `time_step`.
-
-    Parameters
-    ----------
-        h : quantity object
-         window's size
-        t_final : quantity,
-           time on which the window is centered
-        spk : list, array or SpikeTrain
-           spike train to analyze
-        time_step : quantity object, time step at which the windows are slided
-          resolution
-        test_param : matrix, the means of the first row list of `h`,
-                    the second row Empirical and the third row variances of
-                    the limit processes `Lh` are used to normalize the number
-                    of elements inside the windows
-
-    Returns
-    -------
-        time_domain : numpy array
-                   time domain of the `filter derivative process`
-        filter_process : array,
-                      values of the `filter derivative process`
-    """
-
-    u = 1 * pq.s
-
-    try:
-        h_sec = h.rescale(u).magnitude
-    except AttributeError:
-        raise ValueError("h must be a time quantity")
-    try:
-        t_final_sec = t_final.rescale(u).magnitude
-    except AttributeError:
-        raise ValueError("t_final must be a time quanity")
-    try:
-        dt_sec = time_step.rescale(u).magnitude
-    except AttributeError:
-        raise ValueError("time_step must be a time quantity")
-    # domain of the process
-    time_domain = np.arange(h_sec, t_final_sec - h_sec, dt_sec)
-    filter_trajectrory = []
-    # taken from the function used to generate the threshold
-    emp_mean_h = test_param[1][test_param[0] == h]
-    emp_var_h = test_param[2][test_param[0] == h]
-
-    for t in time_domain:
-        filter_trajectrory.append(_filter(t * u, h, spk))
-
-    filter_trajectrory = np.asanyarray(filter_trajectrory)
-    # ordered normalization to give each process the same impact on the max
-    filter_process = (
-        np.abs(filter_trajectrory) - emp_mean_h) / np.sqrt(emp_var_h)
-
-    return time_domain, filter_process
+/annex/objects/MD5-s18575--c4e2d371e6c4c6c842bc0fba0021063e

File diff suppressed because it is too large
+ 1 - 1073
code/elephant/elephant/conversion.py


+ 1 - 223
code/elephant/elephant/cubic.py

@@ -1,223 +1 @@
-# -*- coding: utf-8 -*-
-'''
-CuBIC is a statistical method for the detection of higher order of
-correlations in parallel spike trains based on the analysis of the
-cumulants of the population count.
-Given a list sts of SpikeTrains, the analysis comprises the following
-steps:
-
-1) compute the population histogram (PSTH) with the desired bin size
-       >>> bin_size = 5 * pq.ms
-       >>> pop_count = elephant.statistics.time_histogram(sts, bin_size)
-
-2) apply CuBIC to the population count
-       >>> alpha = 0.05  # significance level of the tests used
-       >>> xi, p_val, k = cubic(data, max_iterations=100, alpha=0.05,
-       ... errorval=4.):
-
-:copyright: Copyright 2016 by the Elephant team, see `doc/authors.rst`.
-:license: BSD, see LICENSE.txt for details.
-'''
-# -*- coding: utf-8 -*-
-
-from __future__ import division, print_function, unicode_literals
-
-import scipy.stats
-import scipy.special
-import math
-import warnings
-
-from elephant.utils import deprecated_alias
-
-__all__ = [
-    "cubic"
-]
-
-
-# Based on matlab code by Benjamin Staude
-# Adaptation to python by Pietro Quaglio and Emiliano Torre
-
-
-@deprecated_alias(data='histogram', ximax='max_iterations')
-def cubic(histogram, max_iterations=100, alpha=0.05):
-    r"""
-    Performs the CuBIC analysis [1]_ on a population histogram, calculated
-    from a population of spiking neurons.
-
-    The null hypothesis :math:`H_0: k_3(data)<=k^*_{3,\xi}` is iteratively
-    tested with increasing correlation order :math:`\xi` until it is possible
-    to accept, with a significance level `alpha`, that :math:`\hat{\xi}` is
-    the minimum order of correlation necessary to explain the third cumulant
-    :math:`k_3(data)`.
-
-    :math:`k^*_{3,\xi}` is the maximized third cumulant, supposing a Compound
-    Poisson Process (CPP) model for correlated spike trains (see [1]_)
-    with maximum order of correlation equal to :math:`\xi`.
-
-    Parameters
-    ----------
-    histogram : neo.AnalogSignal
-        The population histogram (count of spikes per time bin) of the entire
-        population of neurons.
-    max_iterations : int, optional
-         The maximum number of iterations of the hypothesis test. Corresponds
-         to the :math:`\hat{\xi_{\text{max}}}` in [1]_. If it is not possible
-         to compute the :math:`\hat{\xi}` before `max_iterations` iteration,
-         the CuBIC procedure is aborted.
-         Default: 100.
-    alpha : float, optional
-         The significance level of the hypothesis tests performed.
-         Default: 0.05.
-
-    Returns
-    -------
-    xi_hat : int
-        The minimum correlation order estimated by CuBIC, necessary to
-        explain the value of the third cumulant calculated from the population.
-    p : list
-        The ordered list of all the p-values of the hypothesis tests that have
-        been performed. If the maximum number of iteration `max_iterations` is
-        reached, the last p-value is set to -4.
-    kappa : list
-        The list of the first three cumulants of the data.
-    test_aborted : bool
-        Whether the test was aborted because reached the maximum number of
-        iteration, `max_iterations`.
-
-    References
-    ----------
-    .. [1] Staude, Rotter, Gruen, (2009) J. Comp. Neurosci
-
-    """
-    # alpha in in the interval [0,1]
-    if alpha < 0 or alpha > 1:
-        raise ValueError(
-            'the significance level alpha (= %s) has to be in [0,1]' % alpha)
-
-    if not isinstance(max_iterations, int) or max_iterations < 0:
-        raise ValueError("'max_iterations' ({}) has to be a positive integer"
-                         .format(max_iterations))
-
-    # dict of all possible rate functions
-    try:
-        histogram = histogram.magnitude
-    except AttributeError:
-        pass
-    L = len(histogram)
-
-    # compute first three cumulants
-    kappa = _kstat(histogram)
-    xi_hat = 1
-    xi = 1
-    pval = 0.
-    p = []
-    test_aborted = False
-
-    # compute xi_hat iteratively
-    while pval < alpha:
-        xi_hat = xi
-        if xi > max_iterations:
-            warnings.warn('Test aborted, xihat= %i > ximax= %i' % (
-                xi, max_iterations))
-            test_aborted = True
-            break
-
-        # compute p-value
-        pval = _H03xi(kappa, xi, L)
-        p.append(pval)
-        xi = xi + 1
-
-    return xi_hat, p, kappa, test_aborted
-
-
-def _H03xi(kappa, xi, L):
-    '''
-    Computes the p_value for testing  the :math:`H_0: k_3(data)<=k^*_{3,\\xi}`
-    hypothesis of CuBIC in the stationary rate version
-
-    Parameters
-    -----
-    kappa : list
-        The first three cumulants of the populaton of spike trains
-    xi : int
-        The the maximum order of correlation :math:`\\xi` supposed in the
-        hypothesis for which is computed the p value of :math:`H_0`
-    L : float
-        The length of the orginal population histogram on which is performed
-        the CuBIC analysis
-
-    Returns
-    -----
-    p : float
-        The p-value of the hypothesis tests
-    '''
-
-    # Check the order condition of the cumulants necessary to perform CuBIC
-    if kappa[1] < kappa[0]:
-        raise ValueError(
-            'H_0 can not be tested:'
-            'kappa(2) = %f < %f = kappa(1)!!!' % (kappa[1], kappa[0]))
-    else:
-        # computation of the maximized cumulants
-        kstar = [_kappamstar(kappa[:2], i, xi) for i in range(2, 7)]
-        k3star = kstar[1]
-
-        # variance of third cumulant (from Stuart & Ord)
-        sigmak3star = math.sqrt(
-            kstar[4] / L + 9 * (kstar[2] * kstar[0] + kstar[1] ** 2) /
-            (L - 1) + 6 * L * kstar[0] ** 3 / ((L - 1) * (L - 2)))
-        # computation of the p-value (the third cumulant is supposed to
-        # be gaussian distributed)
-        p = 1 - scipy.stats.norm(k3star, sigmak3star).cdf(kappa[2])
-        return p
-
-
-def _kappamstar(kappa, m, xi):
-    '''
-    Computes maximized cumulant of order m
-
-    Parameters
-    -----
-    kappa : list
-        The first two cumulants of the data
-    xi : int
-        The :math:`\\xi` for which is computed the p value of :math:`H_0`
-    m : float
-        The order of the cumulant
-
-    Returns
-    -----
-    k_out : list
-        The maximized cumulant of order m
-    '''
-
-    if xi == 1:
-        kappa_out = kappa[1]
-    else:
-        kappa_out = \
-            (kappa[1] * (xi ** (m - 1) - 1) -
-                kappa[0] * (xi ** (m - 1) - xi)) / (xi - 1)
-    return kappa_out
-
-
-def _kstat(data):
-    '''
-    Compute first three cumulants of a population count of a population of
-    spiking
-    See http://mathworld.wolfram.com/k-Statistic.html
-
-    Parameters
-    -----
-    data : numpy.ndarray
-        The population histogram of the population on which are computed
-        the cumulants
-
-    Returns
-    -----
-    moments : list
-        The first three unbiased cumulants of the population count
-    '''
-    if len(data) == 0:
-        raise ValueError('The input data must be a non-empty array')
-    moments = [scipy.stats.kstat(data, n=n) for n in [1, 2, 3]]
-    return moments
+/annex/objects/MD5-s6977--a4e7b3d770f7cecf20d91a17f5d15c55

+ 1 - 348
code/elephant/elephant/current_source_density.py

@@ -1,348 +1 @@
-# -*- coding: utf-8 -*-
-"""'Current Source Density analysis (CSD) is a class of methods of analysis of
-extracellular electric potentials recorded at multiple sites leading to
-estimates of current sources generating the measured potentials. It is usually
-applied to low-frequency part of the potential (called the Local Field
-Potential, LFP) and to simultaneous recordings or to recordings taken with
-fixed time reference to the onset of specific stimulus (Evoked Potentials)'
-(Definition by Prof.Daniel K. Wójcik for Encyclopedia of Computational
-Neuroscience)
-
-CSD is also called as Source Localization or Source Imaging in the EEG circles.
-Here are CSD methods for different types of electrode configurations.
-
-1D - laminar probe like electrodes.
-2D - Microelectrode Array like
-3D - UtahArray or multiple laminar probes.
-
-The following methods have been implemented so far
-
-1D - StandardCSD, DeltaiCSD, SplineiCSD, StepiCSD, KCSD1D
-2D - KCSD2D, MoIKCSD (Saline layer on top of slice)
-3D - KCSD3D
-
-Each of these methods listed have some advantages. The KCSD methods for
-instance can handle broken or irregular electrode configurations electrode
-
-Keywords: LFP; CSD; Multielectrode; Laminar electrode; Barrel cortex
-
-Citation Policy: See ./current_source_density_src/README.md
-
-Contributors to this  current source density estimation module are:
-Chaitanya Chintaluri(CC), Espen Hagen(EH) and Michał Czerwinski(MC).
-EH implemented the iCSD methods and StandardCSD
-CC implemented the kCSD methods, kCSD1D(MC and CC)
-CC and EH developed the interface to elephant.
-"""
-
-from __future__ import division, print_function, unicode_literals
-
-import neo
-import numpy as np
-import quantities as pq
-from scipy.integrate import simps
-
-import elephant.current_source_density_src.utility_functions as utils
-from elephant.current_source_density_src import KCSD, icsd
-from elephant.utils import deprecated_alias
-
-__all__ = [
-    "estimate_csd",
-    "generate_lfp"
-]
-
-utils.patch_quantities()
-
-available_1d = ['StandardCSD', 'DeltaiCSD', 'StepiCSD', 'SplineiCSD', 'KCSD1D']
-available_2d = ['KCSD2D', 'MoIKCSD']
-available_3d = ['KCSD3D']
-
-kernel_methods = ['KCSD1D', 'KCSD2D', 'KCSD3D', 'MoIKCSD']
-icsd_methods = ['DeltaiCSD', 'StepiCSD', 'SplineiCSD']
-
-py_iCSD_toolbox = ['StandardCSD'] + icsd_methods
-
-
-@deprecated_alias(coords='coordinates')
-def estimate_csd(lfp, coordinates=None, method=None,
-                 process_estimate=True, **kwargs):
-    """
-    Function call to compute the current source density (CSD) from
-    extracellular potential recordings(local-field potentials - LFP) using
-    laminar electrodes or multi-contact electrodes with 2D or 3D geometries.
-
-    Parameters
-    ----------
-    lfp : neo.AnalogSignal
-        positions of electrodes can be added as neo.RecordingChannel
-        coordinate or sent externally as a func argument (See coords)
-    coordinates : [Optional] corresponding spatial coordinates of the
-        electrodes.
-        Defaults to None
-        Otherwise looks for ChannelIndex coordinate
-    method : string
-        Pick a method corresponding to the setup, in this implementation
-        For Laminar probe style (1D), use 'KCSD1D' or 'StandardCSD',
-         or 'DeltaiCSD' or 'StepiCSD' or 'SplineiCSD'
-        For MEA probe style (2D),  use 'KCSD2D', or 'MoIKCSD'
-        For array of laminar probes (3D), use 'KCSD3D'
-        Defaults to None
-    process_estimate : bool
-        In the py_iCSD_toolbox this corresponds to the filter_csd -
-        the parameters are passed as kwargs here ie., f_type and f_order
-        In the kcsd methods this corresponds to cross_validate -
-        the parameters are passed as kwargs here ie., lambdas and Rs
-        Defaults to True
-    kwargs : parameters to each method
-        The parameters corresponding to the method chosen
-        See the documentation of the individual method
-        Default is {} - picks the best parameters,
-
-    Returns
-    -------
-    Estimated CSD
-       neo.AnalogSignal object
-       annotated with the spatial coordinates
-
-    Raises
-    ------
-    AttributeError
-        No units specified for electrode spatial coordinates
-    ValueError
-        Invalid function arguments, wrong method name, or
-        mismatching coordinates
-    TypeError
-        Invalid cv_param argument passed
-    """
-    if not isinstance(lfp, neo.AnalogSignal):
-        raise TypeError('Parameter `lfp` must be a neo.AnalogSignal object')
-    if coordinates is None:
-        coordinates = lfp.channel_index.coordinates
-    else:
-        scaled_coords = []
-        for coord in coordinates:
-            try:
-                scaled_coords.append(coord.rescale(pq.mm))
-            except AttributeError:
-                raise AttributeError('No units given for electrode spatial \
-                coordinates')
-        coordinates = scaled_coords
-    if method is None:
-        raise ValueError('Must specify a method of CSD implementation')
-    if len(coordinates) != lfp.shape[1]:
-        raise ValueError('Number of signals and coords is not same')
-    for ii in coordinates:  # CHECK for Dimensionality of electrodes
-        if len(ii) > 3:
-            raise ValueError('Invalid number of coordinate positions')
-    dim = len(coordinates[0])  # TODO : Generic co-ordinates!
-    if dim == 1 and (method not in available_1d):
-        raise ValueError('Invalid method, Available options are:',
-                         available_1d)
-    if dim == 2 and (method not in available_2d):
-        raise ValueError('Invalid method, Available options are:',
-                         available_2d)
-    if dim == 3 and (method not in available_3d):
-        raise ValueError('Invalid method, Available options are:',
-                         available_3d)
-    if method in kernel_methods:
-        input_array = np.zeros((len(lfp), lfp[0].magnitude.shape[0]))
-        for ii, jj in enumerate(lfp):
-            input_array[ii, :] = jj.rescale(pq.mV).magnitude
-        kernel_method = getattr(KCSD, method)  # fetch the class 'KCSD1D'
-        lambdas = kwargs.pop('lambdas', None)
-        Rs = kwargs.pop('Rs', None)
-        k = kernel_method(np.array(coordinates), input_array.T, **kwargs)
-        if process_estimate:
-            k.cross_validate(lambdas, Rs)
-        estm_csd = k.values()
-        estm_csd = np.rollaxis(estm_csd, -1, 0)
-        output = neo.AnalogSignal(estm_csd * pq.uA / pq.mm**3,
-                                  t_start=lfp.t_start,
-                                  sampling_rate=lfp.sampling_rate)
-
-        if dim == 1:
-            output.annotate(x_coords=k.estm_x)
-        elif dim == 2:
-            output.annotate(x_coords=k.estm_x, y_coords=k.estm_y)
-        elif dim == 3:
-            output.annotate(x_coords=k.estm_x, y_coords=k.estm_y,
-                            z_coords=k.estm_z)
-    elif method in py_iCSD_toolbox:
-
-        coordinates = np.array(coordinates) * coordinates[0].units
-
-        if method in icsd_methods:
-            try:
-                coordinates = coordinates.rescale(kwargs['diam'].units)
-            except KeyError:  # Then why specify as a default in icsd?
-                # All iCSD methods explicitly assume a source
-                # diameter in contrast to the stdCSD  that
-                # implicitly assume infinite source radius
-                raise ValueError("Parameter diam must be specified for iCSD \
-                                  methods: {}".format(", ".join(icsd_methods)))
-
-        if 'f_type' in kwargs:
-            if (kwargs['f_type'] != 'identity') and  \
-               (kwargs['f_order'] is None):
-                raise ValueError("The order of {} filter must be \
-                                  specified".format(kwargs['f_type']))
-
-        lfp = neo.AnalogSignal(np.asarray(lfp).T, units=lfp.units,
-                               sampling_rate=lfp.sampling_rate)
-        csd_method = getattr(icsd, method)  # fetch class from icsd.py file
-        csd_estimator = csd_method(lfp=lfp.magnitude * lfp.units,
-                                   coord_electrode=coordinates.flatten(),
-                                   **kwargs)
-        csd_pqarr = csd_estimator.get_csd()
-
-        if process_estimate:
-            csd_pqarr_filtered = csd_estimator.filter_csd(csd_pqarr)
-            output = neo.AnalogSignal(csd_pqarr_filtered.T,
-                                      t_start=lfp.t_start,
-                                      sampling_rate=lfp.sampling_rate)
-        else:
-            output = neo.AnalogSignal(csd_pqarr.T, t_start=lfp.t_start,
-                                      sampling_rate=lfp.sampling_rate)
-        output.annotate(x_coords=coordinates)
-    return output
-
-
-@deprecated_alias(ele_xx='x_positions', ele_yy='y_positions',
-                  ele_zz='z_positions', xlims='x_limits', ylims='y_limits',
-                  zlims='z_limits', res='resolution')
-def generate_lfp(csd_profile, x_positions, y_positions=None, z_positions=None,
-                 x_limits=[0., 1.], y_limits=[0., 1.], z_limits=[0., 1.],
-                 resolution=50):
-    """
-    Forward modelling for getting the potentials for testing Current Source
-    Density (CSD).
-
-    Parameters
-    ----------
-    csd_profile : callable
-        A function that computes true CSD profile.
-        Available options are (see ./csd/utility_functions.py)
-        1D : gauss_1d_dipole
-        2D : large_source_2D and small_source_2D
-        3D : gauss_3d_dipole
-    x_positions : np.ndarray
-        Positions of the x coordinates of the electrodes
-    y_positions : np.ndarray, optional
-        Positions of the y coordinates of the electrodes
-        Defaults to None, use in 2D or 3D cases only
-    z_positions : np.ndarray, optional
-        Positions of the z coordinates of the electrodes
-        Defaults to None, use in 3D case only
-    x_limits : list, optional
-        A list of [start, end].
-        The starting spatial coordinate and the ending for integration
-        Defaults to [0.,1.]
-    y_limits : list, optional
-        A list of [start, end].
-        The starting spatial coordinate and the ending for integration
-        Defaults to [0.,1.], use only in 2D and 3D case
-    z_limits : list, optional
-        A list of [start, end].
-        The starting spatial coordinate and the ending for integration
-        Defaults to [0.,1.], use only in 3D case
-    resolution : int, optional
-        The resolution of the integration
-        Defaults to 50
-
-    Returns
-    -------
-    LFP : neo.AnalogSignal
-       The potentials created by the csd profile at the electrode positions.
-       The electrode positions are attached as RecordingChannel's coordinate.
-    """
-    def integrate_1D(x0, csd_x, csd, h):
-        m = np.sqrt((csd_x - x0)**2 + h**2) - abs(csd_x - x0)
-        y = csd * m
-        I = simps(y, csd_x)
-        return I
-
-    def integrate_2D(x, y, xlin, ylin, csd, h, X, Y):
-        Ny = ylin.shape[0]
-        m = np.sqrt((x - X)**2 + (y - Y)**2)
-        m[m < 0.0000001] = 0.0000001
-        y = np.arcsinh(2 * h / m) * csd
-        I = np.zeros(Ny)
-        for i in range(Ny):
-            I[i] = simps(y[:, i], ylin)
-        F = simps(I, xlin)
-        return F
-
-    def integrate_3D(x, y, z, xlim, ylim, zlim, csd, xlin, ylin, zlin,
-                     X, Y, Z):
-        Nz = zlin.shape[0]
-        Ny = ylin.shape[0]
-        m = np.sqrt((x - X)**2 + (y - Y)**2 + (z - Z)**2)
-        m[m < 0.0000001] = 0.0000001
-        z = csd / m
-        Iy = np.zeros(Ny)
-        for j in range(Ny):
-            Iz = np.zeros(Nz)
-            for i in range(Nz):
-                Iz[i] = simps(z[:, j, i], zlin)
-            Iy[j] = simps(Iz, ylin)
-        F = simps(Iy, xlin)
-        return F
-    dim = 1
-    if z_positions is not None:
-        dim = 3
-    elif y_positions is not None:
-        dim = 2
-    x = np.linspace(x_limits[0], x_limits[1], resolution)
-    if dim >= 2:
-        y = np.linspace(y_limits[0], y_limits[1], resolution)
-    if dim == 3:
-        z = np.linspace(z_limits[0], z_limits[1], resolution)
-    sigma = 1.0
-    h = 50.
-    pots = np.zeros(len(x_positions))
-    if dim == 1:
-        chrg_x = np.linspace(x_limits[0], x_limits[1], resolution)
-        csd = csd_profile(chrg_x)
-        for ii in range(len(x_positions)):
-            pots[ii] = integrate_1D(x_positions[ii], chrg_x, csd, h)
-        pots /= 2. * sigma  # eq.: 26 from Potworowski et al
-        ele_pos = x_positions
-    elif dim == 2:
-        chrg_x, chrg_y = np.mgrid[
-                         x_limits[0]:x_limits[1]:np.complex(0, resolution),
-                         y_limits[0]:y_limits[1]:np.complex(0, resolution)]
-        csd = csd_profile(chrg_x, chrg_y)
-        for ii in range(len(x_positions)):
-            pots[ii] = integrate_2D(x_positions[ii], y_positions[ii],
-                                    x, y, csd, h, chrg_x, chrg_y)
-        pots /= 2 * np.pi * sigma
-        ele_pos = np.vstack((x_positions, y_positions)).T
-    elif dim == 3:
-        chrg_x, chrg_y, chrg_z = np.mgrid[
-            x_limits[0]:x_limits[1]:np.complex(0, resolution),
-            y_limits[0]:y_limits[1]:np.complex(0, resolution),
-            z_limits[0]:z_limits[1]:np.complex(0, resolution)
-        ]
-        csd = csd_profile(chrg_x, chrg_y, chrg_z)
-        xlin = chrg_x[:, 0, 0]
-        ylin = chrg_y[0, :, 0]
-        zlin = chrg_z[0, 0, :]
-        for ii in range(len(x_positions)):
-            pots[ii] = integrate_3D(x_positions[ii], y_positions[ii],
-                                    z_positions[ii],
-                                    x_limits, y_limits, z_limits, csd,
-                                    xlin, ylin, zlin,
-                                    chrg_x, chrg_y, chrg_z)
-        pots /= 4 * np.pi * sigma
-        ele_pos = np.vstack((x_positions, y_positions, z_positions)).T
-    pots = np.reshape(pots, (-1, 1)) * pq.mV
-    ele_pos = ele_pos * pq.mm
-    lfp = []
-    ch = neo.ChannelIndex(index=range(len(pots)))
-    for ii in range(len(pots)):
-        lfp.append(pots[ii])
-    asig = neo.AnalogSignal(np.array(lfp).T, sampling_rate=pq.kHz, units='mV')
-    ch.coordinates = ele_pos
-    ch.analogsignals.append(asig)
-    ch.create_relationship()
-    return asig
+/annex/objects/MD5-s13697--d824f130e37baf11e140e7cdcb41e752

File diff suppressed because it is too large
+ 1 - 1059
code/elephant/elephant/current_source_density_src/KCSD.py


+ 1 - 93
code/elephant/elephant/current_source_density_src/README.md

@@ -1,96 +1 @@
-Here, are CSD methods for different electrode configurations.
-
-Keywords: Local field potentials; Current-source density; CSD;
-Multielectrode; Laminar electrode; Barrel cortex
-
-1D - laminar probe like electrodes. 
-2D - Microelectrode Array like
-3D - UtahArray or multiple laminar probes.
-
-The following methods have been implemented here, for, 
-
-1D - StandardCSD, DeltaiCSD, SplineiCSD, StepiCSD, KCSD1D
-2D - KCSD2D, MoIKCSD (Saline layer on top of slice)
-3D - KCSD3D
-
-Each of these methods listed have some advantages - except StandardCSD which is
-not recommended. The KCSD methods can handle broken or irregular electrode
-configurations electrode
-
-iCSD
-----
-Python-implementation of the inverse current source density (iCSD) methods from
-http://software.incf.org/software/csdplotter
-
-The Python iCSD toolbox lives on GitHub as well:
-https://github.com/espenhgn/iCSD
-
-The methods were originally developed by Klas H. Pettersen, as described in:
-Klas H. Pettersen, Anna Devor, Istvan Ulbert, Anders M. Dale, Gaute
-T. Einevoll, Current-source density estimation based on inversion of
-electrostatic forward solution: Effects of finite extent of neuronal activity
-and conductivity discontinuities, Journal of Neuroscience Methods, Volume 154,
-Issues 1Ð2, 30 June 2006, Pages 116-133, ISSN 0165-0270,
-http://dx.doi.org/10.1016/j.jneumeth.2005.12.005.
-(http://www.sciencedirect.com/science/article/pii/S0165027005004541)
-
-To see an example of usage of the methods, see the file demo_icsd.py
-
-KCSD 
----- 
-This is 1.0 version of kCSD inverse method proposed in
-
-J. Potworowski, W. Jakuczun, S. Łęski, D. K. Wójcik
-"Kernel Current Source Density Method"
-Neural Computation 24 (2012), 541–575
-
-Some key advantages for KCSD methods are
-
-For citation purposes, 
-If you use this software in published research please cite the following work
-- kCSD1D - [1, 2]
-- kCSD2D - [1, 3]
-- kCSD3D - [1, 4]
-- MoIkCSD - [1, 3, 5]
-
-[1] Potworowski, J., Jakuczun, W., Łęski, S. & Wójcik, D. (2012) 'Kernel
-current source density method.' Neural Comput 24(2), 541-575.
-
-[2] Pettersen, K. H., Devor, A., Ulbert, I., Dale, A. M. & Einevoll,
-G. T. (2006) 'Current-source density estimation based on inversion of
-electrostatic forward solution: effects of finite extent of neuronal activity
-and conductivity discontinuities.' J Neurosci Methods 154(1-2), 116-133.
-
-[3] Łęski, S., Pettersen, K. H., Tunstall, B., Einevoll, G. T., Gigg, J. &
-Wójcik, D. K. (2011) 'Inverse Current Source Density method in two dimensions:
-Inferring neural activation from multielectrode recordings.' Neuroinformatics
-9(4), 401-425.
-
-[4] Łęski, S., Wójcik, D. K., Tereszczuk, J., Świejkowski, D. A., Kublik, E. &
-Wróbel, A. (2007) 'Inverse current-source density method in 3D: reconstruction
-fidelity, boundary effects, and influence of distant sources.' Neuroinformatics
-5(4), 207-222.
-
-[5] Ness, T. V., Chintaluri, C., Potworowski, J., Łeski, S., Głabska, H.,
-Wójcik, D. K. & Einevoll, G. T. (2015) 'Modelling and Analysis of Electrical
-Potentials Recorded in Microelectrode Arrays (MEAs).' Neuroinformatics 13(4),
-403-426.
-
-For your research interests of Kernel methods of CSD please see,
-https://github.com/Neuroinflab/kCSD-python 
-
-Contact: Prof. Daniel K. Wojcik
-
-Here (https://github.com/Neuroinflab/kCSD-python/tree/master/tests), are
-scripts to compare different KCSD methods with different CSD sources. You can
-play around with the different parameters of the methods.
-
-The implentation is based on the Matlab version at INCF
-(http://software.incf.org/software/kcsd), which is now out-dated. A python
-version based on this was developed by Grzegorz Parka
-(https://github.com/INCF/pykCSD), which is also not supported at this
-point. This current version of KCSD methods in elephant is a mirror of
-https://github.com/Neuroinflab/kCSD-python/commit/8e2ae26b00da7b96884f2192ec9ea612b195ec30
+/annex/objects/MD5-s4060--5550b79ec5549010ebe03e1750e4e4ec

+ 1 - 3
code/elephant/elephant/current_source_density_src/__init__.py

@@ -1,3 +1 @@
-# -*- coding: utf-8 -*-
-from . import icsd
-from . import KCSD
+/annex/objects/MD5-s62--ef426c2eb493ca9c45dd0e434f664de8

+ 1 - 201
code/elephant/elephant/current_source_density_src/basis_functions.py

@@ -1,201 +1 @@
-#!/usr/bin/env python
-"""
-This script is used to generate basis sources for the
-kCSD method Jan et.al (2012) for 1D,2D and 3D cases.
-Two 'types' are described here, gaussian and step source,
-These can be easily extended.
-These scripts are based on Grzegorz Parka's,
-Google Summer of Code 2014, INFC/pykCSD
-This was written by :
-Michal Czerwinski, Chaitanya Chintaluri
-Laboratory of Neuroinformatics,
-Nencki Institute of Experimental Biology, Warsaw.
-"""
-from __future__ import division
-
-import numpy as np
-
-def gauss(d, stdev, dim):
-    """Gaussian function
-    Parameters
-    ----------
-    d : floats or np.arrays
-        Distance array to the point of evaluation
-    stdev : float
-        cutoff range
-    dim : int
-        dimension of the gaussian function
-    Returns
-    -------
-    Z : floats or np.arrays
-        function evaluated
-    """
-    Z = np.exp(-(d**2) / (2* stdev**2) ) / (np.sqrt(2*np.pi)*stdev)**dim
-    return Z
-
-def step_1D(d, R):
-    """Returns normalized 1D step function.
-    Parameters
-    ----------
-    d : floats or np.arrays
-        Distance array to the point of evaluation
-    R : float
-        cutoff range
-    Returns
-    -------
-    s : Value of the function (d  <= R) / R
-    """
-    s = (d  <= R)
-    s = s / R #normalize with width
-    return s
-
-def gauss_1D(d, three_stdev):
-    """Returns normalized gaussian 2D scale function
-    Parameters
-    ----------
-    d : floats or np.arrays
-        Distance array to the point of evaluation
-    three_stdev : float
-        3 * standard deviation of the distribution
-    Returns
-    -------
-    Z : (three_std/3)*(1/2*pi)*(exp(-0.5)*stddev**(-2) *(d**2))
-    """
-    stdev = three_stdev/3.0
-    Z = gauss(d, stdev, 1)
-    return Z
-
-def gauss_lim_1D(d, three_stdev):
-    """Returns gausian 2D function cut off after 3 standard deviations.
-    Parameters
-    ----------
-    d : floats or np.arrays
-        Distance array to the point of evaluation
-    three_stdev : float
-        3 * standard deviation of the distribution
-    Returns
-    -------
-    Z : (three_std/3)*(1/2*pi)*(exp(-0.5)*stddev**(-2) *((x-mu)**2)),
-        cut off = three_stdev
-    """
-    Z = gauss_1D(d, three_stdev)
-    Z *= (d < three_stdev)
-    return Z
-
-def step_2D(d, R):
-    """Returns normalized 2D step function.
-    Parameters
-    ----------
-    d : float or np.arrays
-        Distance array to the point of evaluation
-    R : float
-        cutoff range
-
-    Returns
-    -------
-    s : step function
-    """
-    s = (d <= R) / (np.pi*(R**2))
-    return s
-
-def gauss_2D(d, three_stdev):
-    """Returns normalized gaussian 2D scale function
-    Parameters
-    ----------
-    d : floats or np.arrays
-         distance at which we need the function evaluated
-    three_stdev : float
-        3 * standard deviation of the distribution
-    Returns
-    -------
-    Z : function
-        Normalized gaussian 2D function
-    """
-    stdev = three_stdev/3.0
-    Z = gauss(d, stdev, 2)
-    return Z
-
-def gauss_lim_2D(d, three_stdev):
-    """Returns gausian 2D function cut off after 3 standard deviations.
-    Parameters
-    ----------
-    d : floats or np.arrays
-         distance at which we need the function evaluated
-    three_stdev : float
-        3 * standard deviation of the distribution
-    Returns
-    -------
-    Z : function
-        Normalized gaussian 2D function cut off after three_stdev
-    """
-    Z = (d <= three_stdev)*gauss_2D(d, three_stdev)
-    return Z
-
-def gauss_3D(d, three_stdev):
-    """Returns normalized gaussian 3D scale function
-    Parameters
-    ----------
-    d : floats or np.arrays
-        distance at which we need the function evaluated
-    three_stdev : float
-        3 * standard deviation of the distribution
-    Returns
-    -------
-    Z : funtion
-        Normalized gaussian 3D function
-    """
-    stdev = three_stdev/3.0
-    Z = gauss(d, stdev, 3)
-    return Z
-
-def gauss_lim_3D(d, three_stdev):
-    """Returns normalized gaussian 3D scale function cut off after 3stdev
-    Parameters
-    ----------
-    d : floats or np.arrays
-        distance at which we need the function evaluated
-    three_stdev : float
-        3 * standard deviation of the distribution
-    Returns
-    -------
-    Z : funtion
-        Normalized gaussian 3D function cutoff three_Stdev
-    """
-    Z = gauss_3D(d, three_stdev)
-    Z = Z * (d < (three_stdev))
-    return Z
-
-def step_3D(d, R):
-    """Returns normalized 3D step function.
-    Parameters
-    ----------
-    d : floats or np.arrays
-        distance at which we need the function evaluated
-    R : float
-        cutoff range
-    Returns
-    -------
-    s : step function in 3D
-    """
-
-    s = 3/(4*np.pi*R**3)*(d <= R)
-    return s
-
-basis_1D = {
-    "step": step_1D,
-    "gauss": gauss_1D,
-    "gauss_lim": gauss_lim_1D,
-}
-
-
-basis_2D = {
-    "step": step_2D,
-    "gauss": gauss_2D,
-    "gauss_lim": gauss_lim_2D,
-}
-
-basis_3D = {
-    "step": step_3D,
-    "gauss": gauss_3D,
-    "gauss_lim": gauss_lim_3D,
-}
+/annex/objects/MD5-s4939--11d151489698c2d59088922754e83cd1

+ 1 - 887
code/elephant/elephant/current_source_density_src/icsd.py

@@ -1,887 +1 @@
-# -*- coding: utf-8 -*-
-'''
-py-iCSD toolbox!
-Translation of the core functionality of the CSDplotter MATLAB package
-to python.
-
-The methods were originally developed by Klas H. Pettersen, as described in:
-Klas H. Pettersen, Anna Devor, Istvan Ulbert, Anders M. Dale, Gaute T. Einevoll,
-Current-source density estimation based on inversion of electrostatic forward
-solution: Effects of finite extent of neuronal activity and conductivity
-discontinuities, Journal of Neuroscience Methods, Volume 154, Issues 1-2,
-30 June 2006, Pages 116-133, ISSN 0165-0270,
-http://dx.doi.org/10.1016/j.jneumeth.2005.12.005.
-(http://www.sciencedirect.com/science/article/pii/S0165027005004541)
-
-The method themselves are implemented as callable subclasses of the base
-CSD class object, which sets some common attributes,
-and a basic function for calculating the iCSD, and a generic spatial filter
-implementation.
-
-The raw- and filtered CSD estimates are returned as Quantity arrays.
-
-Requires pylab environment to work, i.e numpy+scipy+matplotlib, with the
-addition of quantities (http://pythonhosted.org/quantities) and
-neo (https://pythonhosted.org/neo)-
-
-Original implementation from CSDplotter-0.1.1
-(http://software.incf.org/software/csdplotter) by Klas. H. Pettersen 2005.
-
-Written by:
-- Espen.Hagen@umb.no, 2010,
-- e.hagen@fz-juelich.de, 2015-2016
-
-'''
-
-import numpy as np
-import scipy.integrate as si
-import scipy.signal as ss
-import quantities as pq
-
-
-class CSD(object):
-    '''Base iCSD class'''
-    def __init__(self, lfp, f_type='gaussian', f_order=(3, 1)):
-        '''Initialize parent class iCSD
-
-        Parameters
-        ----------
-        lfp : np.ndarray * quantity.Quantity
-            LFP signal of shape (# channels, # time steps)
-        f_type : str
-            type of spatial filter, must be a scipy.signal filter design method
-        f_order : list
-            settings for spatial filter, arg passed to  filter design function
-        '''
-        self.name = 'CSD estimate parent class'
-        self.lfp = lfp
-        self.f_matrix = np.eye(lfp.shape[0]) * pq.m**3 / pq.S
-        self.f_type = f_type
-        self.f_order = f_order
-
-    def get_csd(self, ):
-        '''
-        Perform the CSD estimate from the LFP and forward matrix F, i.e as
-        CSD=F**-1*LFP
-
-        Arguments
-        ---------
-
-        Returns
-        -------
-        csd : np.ndarray * quantity.Quantity
-            Array with the csd estimate
-        '''
-        csd = np.linalg.solve(self.f_matrix, self.lfp)
-
-        return csd * (self.f_matrix.units**-1 * self.lfp.units).simplified
-
-    def filter_csd(self, csd, filterfunction='convolve'):
-        '''
-        Spatial filtering of the CSD estimate, using an N-point filter
-
-        Arguments
-        ---------
-        csd : np.ndarrray * quantity.Quantity
-            Array with the csd estimate
-        filterfunction : str
-            'filtfilt' or 'convolve'. Apply spatial filter using
-            scipy.signal.filtfilt or scipy.signal.convolve.
-        '''
-        if self.f_type == 'gaussian':
-            try:
-                assert(len(self.f_order) == 2)
-            except AssertionError as ae:
-                raise ae('filter order f_order must be a tuple of length 2')
-        else:
-            try:
-                assert(self.f_order > 0 and isinstance(self.f_order, int))
-            except AssertionError as ae:
-                raise ae('Filter order must be int > 0!')
-        try:
-            assert(filterfunction in ['filtfilt', 'convolve'])
-        except AssertionError as ae:
-            raise ae("{} not equal to 'filtfilt' or \
-                     'convolve'".format(filterfunction))
-
-        if self.f_type == 'boxcar':
-            num = ss.boxcar(self.f_order)
-            denom = np.array([num.sum()])
-        elif self.f_type == 'hamming':
-            num = ss.hamming(self.f_order)
-            denom = np.array([num.sum()])
-        elif self.f_type == 'triangular':
-            num = ss.triang(self.f_order)
-            denom = np.array([num.sum()])
-        elif self.f_type == 'gaussian':
-            num = ss.gaussian(self.f_order[0], self.f_order[1])
-            denom = np.array([num.sum()])
-        elif self.f_type == 'identity':
-            num = np.array([1.])
-            denom = np.array([1.])
-        else:
-            print('%s Wrong filter type!' % self.f_type)
-            raise
-
-        num_string = '[ '
-        for i in num:
-            num_string = num_string + '%.3f ' % i
-        num_string = num_string + ']'
-        denom_string = '[ '
-        for i in denom:
-            denom_string = denom_string + '%.3f ' % i
-        denom_string = denom_string + ']'
-
-        print(('discrete filter coefficients: \nb = {}, \
-               \na = {}'.format(num_string, denom_string)))
-
-        if filterfunction == 'filtfilt':
-            return ss.filtfilt(num, denom, csd, axis=0) * csd.units
-        elif filterfunction == 'convolve':
-            csdf = csd / csd.units
-            for i in range(csdf.shape[1]):
-                csdf[:, i] = ss.convolve(csdf[:, i], num / denom.sum(), 'same')
-            return csdf * csd.units
-
-
-class StandardCSD(CSD):
-    '''
-    Standard CSD method with and without Vaknin electrodes
-    '''
-
-    def __init__(self, lfp, coord_electrode, **kwargs):
-        '''
-        Initialize standard CSD method class with & without Vaknin electrodes.
-
-        Parameters
-        ----------
-        lfp : np.ndarray * quantity.Quantity
-            LFP signal of shape (# channels, # time steps) in units of V
-        coord_electrode : np.ndarray * quantity.Quantity
-            depth of evenly spaced electrode contact points of shape
-            (# contacts, ) in units of m, must be monotonously increasing
-        sigma : float * quantity.Quantity
-            conductivity of tissue in units of S/m or 1/(ohm*m)
-            Defaults to 0.3 S/m
-        vaknin_el : bool
-            flag for using method of Vaknin to endpoint electrodes
-            Defaults to True
-        f_type : str
-            type of spatial filter, must be a scipy.signal filter design method
-            Defaults to 'gaussian'
-        f_order : list
-            settings for spatial filter, arg passed to  filter design function
-            Defaults to (3,1) for the gaussian
-        '''
-        self.parameters(**kwargs)
-        CSD.__init__(self, lfp, self.f_type, self.f_order)
-
-        diff_diff_coord = np.diff(np.diff(coord_electrode)).magnitude
-        zeros_ddc = np.zeros_like(diff_diff_coord)
-        try:
-            assert(np.all(np.isclose(diff_diff_coord, zeros_ddc, atol=1e-12)))
-        except AssertionError as ae:
-            print('coord_electrode not monotonously varying')
-            raise ae
-
-        if self.vaknin_el:
-            # extend lfps array by duplicating potential at endpoint contacts
-            if lfp.ndim == 1:
-                self.lfp = np.empty((lfp.shape[0] + 2, )) * lfp.units
-            else:
-                self.lfp = np.empty((lfp.shape[0] + 2, lfp.shape[1])) * lfp.units
-            self.lfp[0, ] = lfp[0, ]
-            self.lfp[1:-1, ] = lfp
-            self.lfp[-1, ] = lfp[-1, ]
-        else:
-            self.lfp = lfp
-
-        self.name = 'Standard CSD method'
-        self.coord_electrode = coord_electrode
-
-        self.f_inv_matrix = self.get_f_inv_matrix()
-
-    def parameters(self, **kwargs):
-        '''Defining the default values of the method passed as kwargs
-        Parameters
-        ----------
-        **kwargs
-            Same as those passed to initialize the Class
-        '''
-        self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
-        self.vaknin_el = kwargs.pop('vaknin_el', True)
-        self.f_type = kwargs.pop('f_type', 'gaussian')
-        self.f_order = kwargs.pop('f_order', (3, 1))
-        if kwargs:
-            raise TypeError('Invalid keyword arguments:', kwargs.keys())
-
-    def get_f_inv_matrix(self):
-        '''Calculate the inverse F-matrix for the standard CSD method'''
-        h_val = abs(np.diff(self.coord_electrode)[0])
-        f_inv = -np.eye(self.lfp.shape[0])
-
-        # Inner matrix elements  is just the discrete laplacian coefficients
-        for j in range(1, f_inv.shape[0] - 1):
-            f_inv[j, j - 1: j + 2] = np.array([1., -2., 1.])
-        return f_inv * -self.sigma / h_val
-
-    def get_csd(self):
-        '''
-        Perform the iCSD calculation, i.e: iCSD=F_inv*LFP
-
-        Returns
-        -------
-        csd : np.ndarray * quantity.Quantity
-            Array with the csd estimate
-        '''
-        csd = np.dot(self.f_inv_matrix, self.lfp)[1:-1, ]
-        # `np.dot()` does not return correct units, so the units of `csd` must
-        # be assigned manually
-        csd_units = (self.f_inv_matrix.units * self.lfp.units).simplified
-        csd = csd.magnitude * csd_units
-
-        return csd
-
-
-class DeltaiCSD(CSD):
-    '''
-    delta-iCSD method
-    '''
-    def __init__(self, lfp, coord_electrode, **kwargs):
-        '''
-        Initialize the delta-iCSD method class object
-
-        Parameters
-        ----------
-        lfp : np.ndarray * quantity.Quantity
-            LFP signal of shape (# channels, # time steps) in units of V
-        coord_electrode : np.ndarray * quantity.Quantity
-            depth of evenly spaced electrode contact points of shape
-            (# contacts, ) in units of m
-        diam : float * quantity.Quantity
-            diamater of the assumed circular planar current sources centered
-            at each contact
-            Defaults to 500E-6 meters
-        sigma : float * quantity.Quantity
-            conductivity of tissue in units of S/m or 1/(ohm*m)
-            Defaults to 0.3 S / m
-        sigma_top : float * quantity.Quantity
-            conductivity on top of tissue in units of S/m or 1/(ohm*m)
-            Defaults to 0.3 S / m
-        f_type : str
-            type of spatial filter, must be a scipy.signal filter design method
-            Defaults to 'gaussian'
-        f_order : list
-            settings for spatial filter, arg passed to  filter design function
-            Defaults to (3,1) for gaussian
-        '''
-        self.parameters(**kwargs)
-        CSD.__init__(self, lfp, self.f_type, self.f_order)
-
-        try:  # Should the class not take care of this?!
-            assert(self.diam.units == coord_electrode.units)
-        except AssertionError as ae:
-            print('units of coord_electrode ({}) and diam ({}) differ'
-                  .format(coord_electrode.units, self.diam.units))
-            raise ae
-
-        try:
-            assert(np.all(np.diff(coord_electrode) > 0))
-        except AssertionError as ae:
-            print('values of coord_electrode not continously increasing')
-            raise ae
-
-        try:
-            assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
-            if self.diam.size == coord_electrode.size:
-                assert(np.all(self.diam > 0 * self.diam.units))
-            else:
-                assert(self.diam > 0 * self.diam.units)
-        except AssertionError as ae:
-            print('diam must be positive scalar or of same shape \
-                   as coord_electrode')
-            raise ae
-        if self.diam.size == 1:
-            self.diam = np.ones(coord_electrode.size) * self.diam
-
-        self.name = 'delta-iCSD method'
-        self.coord_electrode = coord_electrode
-
-        # initialize F- and iCSD-matrices
-        self.f_matrix = np.empty((self.coord_electrode.size,
-                                  self.coord_electrode.size))
-        self.f_matrix = self.get_f_matrix()
-
-    def parameters(self, **kwargs):
-        '''Defining the default values of the method passed as kwargs
-        Parameters
-        ----------
-        **kwargs
-            Same as those passed to initialize the Class
-        '''
-        self.diam = kwargs.pop('diam', 500E-6 * pq.m)
-        self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
-        self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
-        self.f_type = kwargs.pop('f_type', 'gaussian')
-        self.f_order = kwargs.pop('f_order', (3, 1))
-        if kwargs:
-            raise TypeError('Invalid keyword arguments:', kwargs.keys())
-
-    def get_f_matrix(self):
-        '''Calculate the F-matrix'''
-        f_matrix = np.empty((self.coord_electrode.size,
-                             self.coord_electrode.size)) * self.coord_electrode.units
-        for j in range(self.coord_electrode.size):
-            for i in range(self.coord_electrode.size):
-                f_matrix[j, i] = ((np.sqrt((self.coord_electrode[j] -
-                                            self.coord_electrode[i])**2 +
-                    (self.diam[j] / 2)**2) - abs(self.coord_electrode[j] -
-                                                 self.coord_electrode[i])) +
-                    (self.sigma - self.sigma_top) / (self.sigma +
-                                                     self.sigma_top) *
-                    (np.sqrt((self.coord_electrode[j] +
-                              self.coord_electrode[i])**2 + (self.diam[j] / 2)**2)-
-                    abs(self.coord_electrode[j] + self.coord_electrode[i])))
-
-        f_matrix /= (2 * self.sigma)
-        return f_matrix
-
-
-class StepiCSD(CSD):
-    '''step-iCSD method'''
-    def __init__(self, lfp, coord_electrode, **kwargs):
-
-        '''
-        Initializing step-iCSD method class object
-
-        Parameters
-        ----------
-        lfp : np.ndarray * quantity.Quantity
-            LFP signal of shape (# channels, # time steps) in units of V
-        coord_electrode : np.ndarray * quantity.Quantity
-            depth of evenly spaced electrode contact points of shape
-            (# contacts, ) in units of m
-        diam : float or np.ndarray * quantity.Quantity
-            diameter(s) of the assumed circular planar current sources centered
-            at each contact
-            Defaults to 500E-6 meters
-        h : float or np.ndarray * quantity.Quantity
-            assumed thickness of the source cylinders at all or each contact
-            Defaults to np.ones(15) * 100E-6 * pq.m
-        sigma : float * quantity.Quantity
-            conductivity of tissue in units of S/m or 1/(ohm*m)
-            Defaults to 0.3 S / m
-        sigma_top : float * quantity.Quantity
-            conductivity on top of tissue in units of S/m or 1/(ohm*m)
-            Defaults to 0.3 S / m
-        tol : float
-            tolerance of numerical integration
-            Defaults 1e-6
-        f_type : str
-            type of spatial filter, must be a scipy.signal filter design method
-            Defaults to 'gaussian'
-        f_order : list
-            settings for spatial filter, arg passed to  filter design function
-            Defaults to (3,1) for the gaussian
-        '''
-        self.parameters(**kwargs)
-        CSD.__init__(self, lfp, self.f_type, self.f_order)
-
-        try:  # Should the class not take care of this?
-            assert(self.diam.units == coord_electrode.units)
-        except AssertionError as ae:
-            print('units of coord_electrode ({}) and diam ({}) differ'
-                  .format(coord_electrode.units, self.diam.units))
-            raise ae
-        try:
-            assert(np.all(np.diff(coord_electrode) > 0))
-        except AssertionError as ae:
-            print('values of coord_electrode not continously increasing')
-            raise ae
-
-        try:
-            assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
-            if self.diam.size == coord_electrode.size:
-                assert(np.all(self.diam > 0 * self.diam.units))
-            else:
-                assert(self.diam > 0 * self.diam.units)
-        except AssertionError as ae:
-            print('diam must be positive scalar or of same shape \
-                   as coord_electrode')
-            raise ae
-        if self.diam.size == 1:
-            self.diam = np.ones(coord_electrode.size) * self.diam
-        try:
-            assert(self.h.size == 1 or self.h.size == coord_electrode.size)
-            if self.h.size == coord_electrode.size:
-                assert(np.all(self.h > 0 * self.h.units))
-        except AssertionError as ae:
-            print('h must be scalar or of same shape as coord_electrode')
-            raise ae
-        if self.h.size == 1:
-            self.h = np.ones(coord_electrode.size) * self.h
-
-        self.name = 'step-iCSD method'
-        self.coord_electrode = coord_electrode
-
-        # compute forward-solution matrix
-        self.f_matrix = self.get_f_matrix()
-
-    def parameters(self, **kwargs):
-        '''Defining the default values of the method passed as kwargs
-        Parameters
-        ----------
-        **kwargs
-            Same as those passed to initialize the Class
-        '''
-
-        self.diam = kwargs.pop('diam', 500E-6 * pq.m)
-        self.h = kwargs.pop('h', np.ones(23) * 100E-6 * pq.m)
-        self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
-        self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
-        self.tol = kwargs.pop('tol', 1e-6)
-        self.f_type = kwargs.pop('f_type', 'gaussian')
-        self.f_order = kwargs.pop('f_order', (3, 1))
-        if kwargs:
-            raise TypeError('Invalid keyword arguments:', kwargs.keys())
-
-    def get_f_matrix(self):
-        '''Calculate F-matrix for step iCSD method'''
-        el_len = self.coord_electrode.size
-        f_matrix = np.zeros((el_len, el_len))
-        for j in range(el_len):
-            for i in range(el_len):
-                lower_int = self.coord_electrode[i] - self.h[j] / 2
-                if lower_int < 0:
-                    lower_int = self.h[j].units
-                upper_int = self.coord_electrode[i] + self.h[j] / 2
-
-                # components of f_matrix object
-                f_cyl0 = si.quad(self._f_cylinder,
-                                 a=lower_int, b=upper_int,
-                                 args=(float(self.coord_electrode[j]),
-                                       float(self.diam[j]),
-                                       float(self.sigma)),
-                                 epsabs=self.tol)[0]
-                f_cyl1 = si.quad(self._f_cylinder, a=lower_int, b=upper_int,
-                                 args=(-float(self.coord_electrode[j]),
-                                       float(self.diam[j]), float(self.sigma)),
-                                 epsabs=self.tol)[0]
-
-                # method of images coefficient
-                mom = (self.sigma - self.sigma_top) / (self.sigma + self.sigma_top)
-
-                f_matrix[j, i] = f_cyl0 + mom * f_cyl1
-
-        # assume si.quad trash the units
-        return f_matrix * self.h.units**2 / self.sigma.units
-
-    def _f_cylinder(self, zeta, z_val, diam, sigma):
-        '''function used by class method'''
-        f_cyl = 1. / (2. * sigma) * \
-            (np.sqrt((diam / 2)**2 + ((z_val - zeta))**2) - abs(z_val - zeta))
-        return f_cyl
-
-
-class SplineiCSD(CSD):
-    '''spline iCSD method'''
-    def __init__(self, lfp, coord_electrode, **kwargs):
-
-        '''
-        Initializing spline-iCSD method class object
-
-        Parameters
-        ----------
-        lfp : np.ndarray * quantity.Quantity
-            LFP signal of shape (# channels, # time steps) in units of V
-        coord_electrode : np.ndarray * quantity.Quantity
-            depth of evenly spaced electrode contact points of shape
-            (# contacts, ) in units of m
-        diam : float * quantity.Quantity
-            diamater of the assumed circular planar current sources centered
-            at each contact
-            Defaults to 500E-6 meters
-        sigma : float * quantity.Quantity
-            conductivity of tissue in units of S/m or 1/(ohm*m)
-            Defaults to 0.3 S / m
-        sigma_top : float * quantity.Quantity
-            conductivity on top of tissue in units of S/m or 1/(ohm*m)
-            Defaults to 0.3 S / m
-        tol : float
-            tolerance of numerical integration
-            Defaults 1e-6
-        f_type : str
-            type of spatial filter, must be a scipy.signal filter design method
-            Defaults to 'gaussian'
-        f_order : list
-            settings for spatial filter, arg passed to  filter design function
-            Defaults to (3,1) for the gaussian
-        num_steps : int
-            number of data points for the spatially upsampled LFP/CSD data
-            Defaults to 200
-        '''
-        self.parameters(**kwargs)
-        CSD.__init__(self, lfp, self.f_type, self.f_order)
-
-        try:  # Should the class not take care of this?!
-            assert(self.diam.units == coord_electrode.units)
-        except AssertionError as ae:
-            print('units of coord_electrode ({}) and diam ({}) differ'
-                  .format(coord_electrode.units, self.diam.units))
-            raise
-        try:
-            assert(np.all(np.diff(coord_electrode) > 0))
-        except AssertionError as ae:
-            print('values of coord_electrode not continously increasing')
-            raise ae
-
-        try:
-            assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
-            if self.diam.size == coord_electrode.size:
-                assert(np.all(self.diam > 0 * self.diam.units))
-        except AssertionError as ae:
-            print('diam must be scalar or of same shape as coord_electrode')
-            raise ae
-        if self.diam.size == 1:
-            self.diam = np.ones(coord_electrode.size) * self.diam
-
-        self.name = 'spline-iCSD method'
-        self.coord_electrode = coord_electrode
-
-        # compute stuff
-        self.f_matrix = self.get_f_matrix()
-
-    def parameters(self, **kwargs):
-        '''Defining the default values of the method passed as kwargs
-        Parameters
-        ----------
-        **kwargs
-            Same as those passed to initialize the Class
-        '''
-        self.diam = kwargs.pop('diam', 500E-6 * pq.m)
-        self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
-        self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
-        self.tol = kwargs.pop('tol', 1e-6)
-        self.num_steps = kwargs.pop('num_steps', 200)
-        self.f_type = kwargs.pop('f_type', 'gaussian')
-        self.f_order = kwargs.pop('f_order', (3, 1))
-        if kwargs:
-            raise TypeError('Invalid keyword arguments:', kwargs.keys())
-
-    def get_f_matrix(self):
-        '''Calculate the F-matrix for cubic spline iCSD method'''
-        el_len = self.coord_electrode.size
-        z_js = np.zeros(el_len + 1)
-        z_js[:-1] = np.array(self.coord_electrode)
-        z_js[-1] = z_js[-2] + float(np.diff(self.coord_electrode).mean())
-
-        # Define integration matrixes
-        f_mat0 = np.zeros((el_len, el_len + 1))
-        f_mat1 = np.zeros((el_len, el_len + 1))
-        f_mat2 = np.zeros((el_len, el_len + 1))
-        f_mat3 = np.zeros((el_len, el_len + 1))
-
-        # Calc. elements
-        for j in range(el_len):
-            for i in range(el_len):
-                f_mat0[j, i] = si.quad(self._f_mat0, a=z_js[i], b=z_js[i + 1],
-                                       args=(z_js[j + 1],
-                                             float(self.sigma),
-                                             float(self.diam[j])),
-                                       epsabs=self.tol)[0]
-                f_mat1[j, i] = si.quad(self._f_mat1, a=z_js[i], b=z_js[i + 1],
-                                       args=(z_js[j + 1], z_js[i],
-                                             float(self.sigma),
-                                             float(self.diam[j])),
-                                       epsabs=self.tol)[0]
-                f_mat2[j, i] = si.quad(self._f_mat2, a=z_js[i], b=z_js[i + 1],
-                                       args=(z_js[j + 1], z_js[i],
-                                             float(self.sigma),
-                                             float(self.diam[j])),
-                                       epsabs=self.tol)[0]
-                f_mat3[j, i] = si.quad(self._f_mat3, a=z_js[i], b=z_js[i + 1],
-                                       args=(z_js[j + 1], z_js[i],
-                                             float(self.sigma),
-                                             float(self.diam[j])),
-                                       epsabs=self.tol)[0]
-
-                # image technique if conductivity not constant:
-                if self.sigma != self.sigma_top:
-                    f_mat0[j, i] = f_mat0[j, i] + (self.sigma-self.sigma_top) / \
-                                                (self.sigma + self.sigma_top) * \
-                            si.quad(self._f_mat0, a=z_js[i], b=z_js[i+1], \
-                                    args=(-z_js[j+1],
-                                          float(self.sigma), float(self.diam[j])), \
-                                    epsabs=self.tol)[0]
-                    f_mat1[j, i] = f_mat1[j, i] + (self.sigma-self.sigma_top) / \
-                        (self.sigma + self.sigma_top) * \
-                            si.quad(self._f_mat1, a=z_js[i], b=z_js[i+1], \
-                                args=(-z_js[j+1], z_js[i], float(self.sigma),
-                                      float(self.diam[j])), epsabs=self.tol)[0]
-                    f_mat2[j, i] = f_mat2[j, i] + (self.sigma-self.sigma_top) / \
-                        (self.sigma + self.sigma_top) * \
-                            si.quad(self._f_mat2, a=z_js[i], b=z_js[i+1], \
-                                args=(-z_js[j+1], z_js[i], float(self.sigma),
-                                      float(self.diam[j])), epsabs=self.tol)[0]
-                    f_mat3[j, i] = f_mat3[j, i] + (self.sigma-self.sigma_top) / \
-                        (self.sigma + self.sigma_top) * \
-                            si.quad(self._f_mat3, a=z_js[i], b=z_js[i+1], \
-                                args=(-z_js[j+1], z_js[i], float(self.sigma),
-                                      float(self.diam[j])), epsabs=self.tol)[0]
-
-        e_mat0, e_mat1, e_mat2, e_mat3 = self._calc_e_matrices()
-
-        # Calculate the F-matrix
-        f_matrix = np.eye(el_len + 2)
-        f_matrix[1:-1, :] = np.dot(f_mat0, e_mat0) + \
-                            np.dot(f_mat1, e_mat1) + \
-                            np.dot(f_mat2, e_mat2) + \
-                            np.dot(f_mat3, e_mat3)
-
-        return f_matrix * self.coord_electrode.units**2 / self.sigma.units
-
-    def get_csd(self):
-        '''
-        Calculate the iCSD using the spline iCSD method
-
-        Returns
-        -------
-        csd : np.ndarray * quantity.Quantity
-            Array with csd estimate
-
-
-        '''
-        e_mat = self._calc_e_matrices()
-
-        el_len = self.coord_electrode.size
-
-        # padding the lfp with zeros on top/bottom
-        if self.lfp.ndim == 1:
-            cs_lfp = np.r_[[0], np.asarray(self.lfp), [0]].reshape(1, -1).T
-            csd = np.zeros(self.num_steps)
-        else:
-            cs_lfp = np.vstack((np.zeros(self.lfp.shape[1]),
-                                np.asarray(self.lfp),
-                                np.zeros(self.lfp.shape[1])))
-            csd = np.zeros((self.num_steps, self.lfp.shape[1]))
-        cs_lfp *= self.lfp.units
-
-        # CSD coefficients
-        csd_coeff = np.linalg.solve(self.f_matrix, cs_lfp)
-
-        # The cubic spline polynomial coefficients
-        a_mat0 = np.dot(e_mat[0], csd_coeff)
-        a_mat1 = np.dot(e_mat[1], csd_coeff)
-        a_mat2 = np.dot(e_mat[2], csd_coeff)
-        a_mat3 = np.dot(e_mat[3], csd_coeff)
-
-        # Extend electrode coordinates in both end by min contact interdistance
-        h = np.diff(self.coord_electrode).min()
-        z_js = np.zeros(el_len + 2)
-        z_js[0] = self.coord_electrode[0] - h
-        z_js[1: -1] = self.coord_electrode
-        z_js[-1] = self.coord_electrode[-1] + h
-
-        # create high res spatial grid
-        out_zs = np.linspace(z_js[1], z_js[-2], self.num_steps)
-
-        # Calculate iCSD estimate on grid from polynomial coefficients.
-        i = 0
-        for j in range(self.num_steps):
-            if out_zs[j] >= z_js[i + 1]:
-                i += 1
-            csd[j, ] = a_mat0[i, :] + a_mat1[i, :] * \
-                             (out_zs[j] - z_js[i]) + \
-                a_mat2[i, :] * (out_zs[j] - z_js[i])**2 + \
-                a_mat3[i, :] * (out_zs[j] - z_js[i])**3
-
-        csd_unit = (self.f_matrix.units**-1 * self.lfp.units).simplified
-
-        return csd * csd_unit
-
-    def _f_mat0(self, zeta, z_val, sigma, diam):
-        '''0'th order potential function'''
-        return 1. / (2. * sigma) * \
-            (np.sqrt((diam / 2)**2 + ((z_val - zeta))**2) - abs(z_val - zeta))
-
-    def _f_mat1(self, zeta, z_val, zi_val, sigma, diam):
-        '''1'th order potential function'''
-        return (zeta - zi_val) * self._f_mat0(zeta, z_val, sigma, diam)
-
-    def _f_mat2(self, zeta, z_val, zi_val, sigma, diam):
-        '''2'nd order potential function'''
-        return (zeta - zi_val)**2 * self._f_mat0(zeta, z_val, sigma, diam)
-
-    def _f_mat3(self, zeta, z_val, zi_val, sigma, diam):
-        '''3'rd order potential function'''
-        return (zeta - zi_val)**3 * self._f_mat0(zeta, z_val, sigma, diam)
-
-    def _calc_k_matrix(self):
-        '''Calculate the K-matrix used by to calculate E-matrices'''
-        el_len = self.coord_electrode.size
-        h = float(np.diff(self.coord_electrode).min())
-
-        c_jm1 = np.eye(el_len + 2, k=0) / h
-        c_jm1[0, 0] = 0
-
-        c_j0 = np.eye(el_len + 2) / h
-        c_j0[-1, -1] = 0
-
-        c_jall = c_j0
-        c_jall[0, 0] = 1
-        c_jall[-1, -1] = 1
-
-        tjp1 = np.eye(el_len + 2, k=1)
-        tjm1 = np.eye(el_len + 2, k=-1)
-
-        tj0 = np.eye(el_len + 2)
-        tj0[0, 0] = 0
-        tj0[-1, -1] = 0
-
-        # Defining K-matrix used to calculate e_mat1-3
-        return np.dot(np.linalg.inv(np.dot(c_jm1, tjm1) +
-                                    2 * np.dot(c_jm1, tj0) +
-                                    2 * c_jall +
-                                    np.dot(c_j0, tjp1)),
-                      3 * (np.dot(np.dot(c_jm1, c_jm1), tj0) -
-                           np.dot(np.dot(c_jm1, c_jm1), tjm1) +
-                           np.dot(np.dot(c_j0, c_j0), tjp1) -
-                           np.dot(np.dot(c_j0, c_j0), tj0)))
-
-    def _calc_e_matrices(self):
-        '''Calculate the E-matrices used by cubic spline iCSD method'''
-        el_len = self.coord_electrode.size
-        # expanding electrode grid
-        h = float(np.diff(self.coord_electrode).min())
-
-        # Define transformation matrices
-        c_mat3 = np.eye(el_len + 1) / h
-
-        # Get K-matrix
-        k_matrix = self._calc_k_matrix()
-
-        # Define matrixes for C to A transformation:
-        tja = np.eye(el_len + 2)[:-1, ]
-        tjp1a = np.eye(el_len + 2, k=1)[:-1, ]
-
-        # Define spline coefficients
-        e_mat0 = tja
-        e_mat1 = np.dot(tja, k_matrix)
-        e_mat2 = 3 * np.dot(c_mat3**2, (tjp1a - tja)) - \
-                            np.dot(np.dot(c_mat3, (tjp1a + 2 * tja)), k_matrix)
-        e_mat3 = 2 * np.dot(c_mat3**3, (tja - tjp1a)) + \
-                            np.dot(np.dot(c_mat3**2, (tjp1a + tja)), k_matrix)
-
-        return e_mat0, e_mat1, e_mat2, e_mat3
-
-
-if __name__ == '__main__':
-    from scipy.io import loadmat
-    import matplotlib.pyplot as plt
-
-    
-    #loading test data
-    test_data = loadmat('test_data.mat')
-    
-    #prepare lfp data for use, by changing the units to SI and append quantities,
-    #along with electrode geometry, conductivities and assumed source geometry
-    lfp_data = test_data['pot1'] * 1E-6 * pq.V        # [uV] -> [V]
-    z_data = np.linspace(100E-6, 2300E-6, 23) * pq.m  # [m]
-    diam = 500E-6 * pq.m                              # [m]
-    h = 100E-6 * pq.m                                 # [m]
-    sigma = 0.3 * pq.S / pq.m                         # [S/m] or [1/(ohm*m)]
-    sigma_top = 0.3 * pq.S / pq.m                     # [S/m] or [1/(ohm*m)]
-    
-    # Input dictionaries for each method
-    delta_input = {
-        'lfp' : lfp_data,
-        'coord_electrode' : z_data,
-        'diam' : diam,          # source diameter
-        'sigma' : sigma,        # extracellular conductivity
-        'sigma_top' : sigma,    # conductivity on top of cortex
-        'f_type' : 'gaussian',  # gaussian filter
-        'f_order' : (3, 1),     # 3-point filter, sigma = 1.
-    }
-    step_input = {
-        'lfp' : lfp_data,
-        'coord_electrode' : z_data,
-        'diam' : diam,
-        'h' : h,                # source thickness
-        'sigma' : sigma,
-        'sigma_top' : sigma,
-        'tol' : 1E-12,          # Tolerance in numerical integration
-        'f_type' : 'gaussian',
-        'f_order' : (3, 1),
-    }
-    spline_input = {
-        'lfp' : lfp_data,
-        'coord_electrode' : z_data,
-        'diam' : diam,
-        'sigma' : sigma,
-        'sigma_top' : sigma,
-        'num_steps' : 201,      # Spatial CSD upsampling to N steps
-        'tol' : 1E-12,
-        'f_type' : 'gaussian',
-        'f_order' : (20, 5),
-    }
-    std_input = {
-        'lfp' : lfp_data,
-        'coord_electrode' : z_data,
-        'sigma' : sigma,
-        'f_type' : 'gaussian',
-        'f_order' : (3, 1),
-    }
-    
-    
-    #Create the different CSD-method class instances. We use the class methods
-    #get_csd() and filter_csd() below to get the raw and spatially filtered
-    #versions of the current-source density estimates.
-    csd_dict = dict(
-        delta_icsd = DeltaiCSD(**delta_input),
-        step_icsd = StepiCSD(**step_input),
-        spline_icsd = SplineiCSD(**spline_input),
-        std_csd = StandardCSD(**std_input),
-    )
-    
-    #plot
-    for method, csd_obj in list(csd_dict.items()):
-        fig, axes = plt.subplots(3,1, figsize=(8,8))
-    
-        #plot LFP signal
-        ax = axes[0]
-        im = ax.imshow(np.array(lfp_data), origin='upper', vmin=-abs(lfp_data).max(), \
-                  vmax=abs(lfp_data).max(), cmap='jet_r', interpolation='nearest')
-        ax.axis(ax.axis('tight'))
-        cb = plt.colorbar(im, ax=ax)
-        cb.set_label('LFP (%s)' % lfp_data.dimensionality.string)
-        ax.set_xticklabels([])
-        ax.set_title('LFP')
-        ax.set_ylabel('ch #')
-    
-        #plot raw csd estimate
-        csd = csd_obj.get_csd()
-        ax = axes[1]
-        im = ax.imshow(np.array(csd), origin='upper', vmin=-abs(csd).max(), \
-              vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest')
-        ax.axis(ax.axis('tight'))
-        ax.set_title(csd_obj.name)
-        cb = plt.colorbar(im, ax=ax)
-        cb.set_label('CSD (%s)' % csd.dimensionality.string)
-        ax.set_xticklabels([])
-        ax.set_ylabel('ch #')
-    
-        #plot spatially filtered csd estimate
-        ax = axes[2]
-        csd = csd_obj.filter_csd(csd)
-        im = ax.imshow(np.array(csd), origin='upper', vmin=-abs(csd).max(), \
-              vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest')
-        ax.axis(ax.axis('tight'))
-        ax.set_title(csd_obj.name + ', filtered')
-        cb = plt.colorbar(im, ax=ax)
-        cb.set_label('CSD (%s)' % csd.dimensionality.string)
-        ax.set_ylabel('ch #')
-        ax.set_xlabel('timestep')
-    
-    
-    plt.show()
-
+/annex/objects/MD5-s35175--13d5e113e82873e1579cb3ae927fa634

BIN
code/elephant/elephant/current_source_density_src/test_data.mat


+ 1 - 364
code/elephant/elephant/current_source_density_src/utility_functions.py

@@ -1,364 +1 @@
-# -*- coding: utf-8 -*-
-"""
-These are some useful functions used in CSD methods,
-They include CSD source profiles to be used as ground truths,
-placement of electrodes in 1D, 2D and 3D., etc
-These scripts are based on Grzegorz Parka's,
-Google Summer of Code 2014, INFC/pykCSD
-This was written by :
-Michal Czerwinski, Chaitanya Chintaluri
-Laboratory of Neuroinformatics,
-Nencki Institute of Experimental Biology, Warsaw.
-"""
-from __future__ import division
-
-import numpy as np
-from numpy import exp
-import quantities as pq
-
-
-def patch_quantities():
-    """patch quantities with the SI unit Siemens if it does not exist"""
-    for symbol, prefix, definition, u_symbol in zip(
-        ['siemens', 'S', 'mS', 'uS', 'nS', 'pS'],
-        ['', '', 'milli', 'micro', 'nano', 'pico'],
-        [pq.A / pq.V, pq.A / pq.V, 'S', 'mS', 'uS', 'nS'],
-        [None, None, None, None, u'µS', None]):
-        if type(definition) is str:
-            definition = lastdefinition / 1000
-        if not hasattr(pq, symbol):
-            setattr(pq, symbol, pq.UnitQuantity(
-                prefix + 'siemens',
-                definition,
-                symbol=symbol,
-                u_symbol=u_symbol))
-        lastdefinition = definition
-    return
-
-
-def contains_duplicated_electrodes(elec_pos):
-    """Checks for duplicate electrodes
-    Parameters
-    ----------
-    elec_pos : np.array
-
-    Returns
-    -------
-    has_duplicated_elec : Boolean
-    """
-    unique_elec_pos = set(map(tuple, elec_pos))
-    has_duplicated_elec = len(unique_elec_pos) < len(elec_pos)
-    return has_duplicated_elec
-
-
-def distribute_srcs_1D(X, n_src, ext_x, R_init):
-    """Distribute sources in 1D equally spaced
-    Parameters
-    ----------
-    X : np.arrays
-        points at which CSD will be estimated
-    n_src : int
-        number of sources to be included in the model
-    ext_x : floats
-        how much should the sources extend the area X
-    R_init : float
-        Same as R in 1D case
-    Returns
-    -------
-    X_src : np.arrays
-        positions of the sources
-    R : float
-        effective radius of the basis element
-    """
-    X_src = np.mgrid[(np.min(X) - ext_x):(np.max(X) + ext_x):
-                     np.complex(0, n_src)]
-    R = R_init
-    return X_src, R
-
-
-def distribute_srcs_2D(X, Y, n_src, ext_x, ext_y, R_init):
-    """Distribute n_src's in the given area evenly
-    Parameters
-    ----------
-    X, Y : np.arrays
-        points at which CSD will be estimated
-    n_src : int
-        demanded number of sources to be included in the model
-    ext_x, ext_y : floats
-        how should the sources extend the area X, Y
-    R_init : float
-        demanded radius of the basis element
-    Returns
-    -------
-    X_src, Y_src : np.arrays
-        positions of the sources
-    nx, ny : ints
-        number of sources in directions x,y
-        new n_src = nx * ny may not be equal to the demanded number of sources
-    R : float
-        effective radius of the basis element
-    """
-    Lx = np.max(X) - np.min(X)
-    Ly = np.max(Y) - np.min(Y)
-    Lx_n = Lx + (2 * ext_x)
-    Ly_n = Ly + (2 * ext_y)
-    [nx, ny, Lx_nn, Ly_nn, ds] = get_src_params_2D(Lx_n, Ly_n, n_src)
-    ext_x_n = (Lx_nn - Lx) / 2
-    ext_y_n = (Ly_nn - Ly) / 2
-    X_src, Y_src = np.mgrid[(np.min(X) - ext_x_n):(np.max(X) + ext_x_n):
-                            np.complex(0, nx),
-                            (np.min(Y) - ext_y_n):(np.max(Y) + ext_y_n):
-                            np.complex(0, ny)]
-    # d = round(R_init / ds)
-    R = R_init  # R = d * ds
-    return X_src, Y_src, R
-
-
-def get_src_params_2D(Lx, Ly, n_src):
-    """Distribute n_src sources evenly in a rectangle of size Lx * Ly
-    Parameters
-    ----------
-    Lx, Ly : floats
-        lengths in the directions x, y of the area,
-        the sources should be placed
-    n_src : int
-        demanded number of sources
-
-    Returns
-    -------
-    nx, ny : ints
-        number of sources in directions x, y
-        new n_src = nx * ny may not be equal to the demanded number of sources
-    Lx_n, Ly_n : floats
-        updated lengths in the directions x, y
-    ds : float
-        spacing between the sources
-    """
-    coeff = [Ly, Lx - Ly, -Lx * n_src]
-    rts = np.roots(coeff)
-    r = [r for r in rts if type(r) is not complex and r > 0]
-    nx = r[0]
-    ny = n_src / nx
-    ds = Lx / (nx - 1)
-    nx = np.floor(nx) + 1
-    ny = np.floor(ny) + 1
-    Lx_n = (nx - 1) * ds
-    Ly_n = (ny - 1) * ds
-    return (nx, ny, Lx_n, Ly_n, ds)
-
-
-def distribute_srcs_3D(X, Y, Z, n_src, ext_x, ext_y, ext_z, R_init):
-    """Distribute n_src sources evenly in a rectangle of size Lx * Ly * Lz
-    Parameters
-    ----------
-    X, Y, Z : np.arrays
-        points at which CSD will be estimated
-    n_src : int
-        desired number of sources we want to include in the model
-    ext_x, ext_y, ext_z : floats
-        how should the sources extend over the area X,Y,Z
-    R_init : float
-        demanded radius of the basis element
-
-    Returns
-    -------
-    X_src, Y_src, Z_src : np.arrays
-        positions of the sources in 3D space
-    nx, ny, nz : ints
-        number of sources in directions x,y,z
-        new n_src = nx * ny * nz may not be equal to the demanded number of
-        sources
-
-    R : float
-        updated radius of the basis element
-    """
-    Lx = np.max(X) - np.min(X)
-    Ly = np.max(Y) - np.min(Y)
-    Lz = np.max(Z) - np.min(Z)
-    Lx_n = Lx + 2 * ext_x
-    Ly_n = Ly + 2 * ext_y
-    Lz_n = Lz + 2 * ext_z
-    (nx, ny, nz, Lx_nn, Ly_nn, Lz_nn, ds) = get_src_params_3D(Lx_n,
-                                                              Ly_n,
-                                                              Lz_n,
-                                                              n_src)
-    ext_x_n = (Lx_nn - Lx) / 2
-    ext_y_n = (Ly_nn - Ly) / 2
-    ext_z_n = (Lz_nn - Lz) / 2
-    X_src, Y_src, Z_src = np.mgrid[(np.min(X) - ext_x_n):(np.max(X) + ext_x_n):
-                                   np.complex(0, nx),
-                                   (np.min(Y) - ext_y_n):(np.max(Y) + ext_y_n):
-                                   np.complex(0, ny),
-                                   (np.min(Z) - ext_z_n):(np.max(Z) + ext_z_n):
-                                   np.complex(0, nz)]
-    # d = np.round(R_init / ds)
-    R = R_init
-    return (X_src, Y_src, Z_src, R)
-
-
-def get_src_params_3D(Lx, Ly, Lz, n_src):
-    """Helps to evenly distribute n_src sources in a cuboid of size Lx * Ly * Lz
-    Parameters
-    ----------
-    Lx, Ly, Lz : floats
-        lengths in the directions x, y, z of the area,
-        the sources should be placed
-    n_src : int
-        demanded number of sources to be included in the model
-    Returns
-    -------
-    nx, ny, nz : ints
-        number of sources in directions x, y, z
-        new n_src = nx * ny * nz may not be equal to the demanded number of
-        sources
-    Lx_n, Ly_n, Lz_n : floats
-        updated lengths in the directions x, y, z
-    ds : float
-        spacing between the sources (grid nodes)
-    """
-    V = Lx * Ly * Lz
-    V_unit = V / n_src
-    L_unit = V_unit**(1. / 3.)
-    nx = np.ceil(Lx / L_unit)
-    ny = np.ceil(Ly / L_unit)
-    nz = np.ceil(Lz / L_unit)
-    ds = Lx / (nx - 1)
-    Lx_n = (nx - 1) * ds
-    Ly_n = (ny - 1) * ds
-    Lz_n = (nz - 1) * ds
-    return (nx, ny, nz, Lx_n, Ly_n, Lz_n, ds)
-
-
-def generate_electrodes(dim, xlims=[0.1, 0.9], ylims=[0.1, 0.9],
-                        zlims=[0.1, 0.9], res=5):
-    """Generates electrodes, helpful for FWD funtion.
-        Parameters
-        ----------
-        dim : int
-            Dimensionality of the electrodes, 1,2 or 3
-        xlims : [start, end]
-            Spatial limits of the electrodes
-        ylims : [start, end]
-            Spatial limits of the electrodes
-        zlims : [start, end]
-            Spatial limits of the electrodes
-        res : int
-            How many electrodes in each dimension
-        Returns
-        -------
-        ele_x, ele_y, ele_z : flattened np.array of the electrode pos
-
-    """
-    if dim == 1:
-        ele_x = np.mgrid[xlims[0]: xlims[1]: np.complex(0, res)]
-        ele_x = ele_x.flatten()
-        return ele_x
-    elif dim == 2:
-        ele_x, ele_y = np.mgrid[xlims[0]: xlims[1]: np.complex(0, res),
-                                ylims[0]: ylims[1]: np.complex(0, res)]
-        ele_x = ele_x.flatten()
-        ele_y = ele_y.flatten()
-        return ele_x, ele_y
-    elif dim == 3:
-        ele_x, ele_y, ele_z = np.mgrid[xlims[0]: xlims[1]: np.complex(0, res),
-                                       ylims[0]: ylims[1]: np.complex(0, res),
-                                       zlims[0]: zlims[1]: np.complex(0, res)]
-        ele_x = ele_x.flatten()
-        ele_y = ele_y.flatten()
-        ele_z = ele_z.flatten()
-        return ele_x, ele_y, ele_z
-
-
-def gauss_1d_dipole(x):
-    """1D Gaussian dipole source is placed between 0 and 1
-       to be used to test the CSD
-
-       Parameters
-       ----------
-       x : np.array
-           Spatial pts. at which the true csd is evaluated
-
-       Returns
-       -------
-       f : np.array
-           The value of the csd at the requested points
-    """
-    src = 0.5*exp(-((x-0.7)**2)/(2.*0.3))*(2*np.pi*0.3)**-0.5
-    snk = -0.5*exp(-((x-0.3)**2)/(2.*0.3))*(2*np.pi*0.3)**-0.5
-    f = src+snk
-    return f
-
-def large_source_2D(x, y):
-    """2D Gaussian large source profile - to use to test csd
-       Parameters
-       ----------
-       x : np.array
-           Spatial x pts. at which the true csd is evaluated
-       y : np.array
-           Spatial y pts. at which the true csd is evaluated
-       Returns
-       -------
-       f : np.array
-           The value of the csd at the requested points
-    """
-    zz = [0.4, -0.3, -0.1, 0.6]
-    zs = [0.2, 0.3, 0.4, 0.2]
-    f1 = 0.5965*exp( (-1*(x-0.1350)**2 - (y-0.8628)**2) /0.4464)* exp(-(-zz[0])**2 / zs[0]) /exp(-(zz[0])**2/zs[0])
-    f2 = -0.9269*exp( (-2*(x-0.1848)**2 - (y-0.0897)**2) /0.2046)* exp(-(-zz[1])**2 / zs[1]) /exp(-(zz[1])**2/zs[1]);
-    f3 = 0.5910*exp( (-3*(x-1.3189)**2 - (y-0.3522)**2) /0.2129)* exp(-(-zz[2])**2 / zs[2]) /exp(-(zz[2])**2/zs[2]);
-    f4 = -0.1963*exp( (-4*(x-1.3386)**2 - (y-0.5297)**2) /0.2507)* exp(-(-zz[3])**2 / zs[3]) /exp(-(zz[3])**2/zs[3]);
-    f = f1+f2+f3+f4
-    return f
-
-def small_source_2D(x, y):
-    """2D Gaussian small source profile - to be used to test csd
-       Parameters
-       ----------
-       x : np.array
-           Spatial x pts. at which the true csd is evaluated
-       y : np.array
-           Spatial y pts. at which the true csd is evaluated
-       Returns
-       -------
-       f : np.array
-           The value of the csd at the requested points
-    """
-    def gauss2d(x,y,p):
-        rcen_x = p[0] * np.cos(p[5]) - p[1] * np.sin(p[5])
-        rcen_y = p[0] * np.sin(p[5]) + p[1] * np.cos(p[5])
-        xp = x * np.cos(p[5]) - y * np.sin(p[5])
-        yp = x * np.sin(p[5]) + y * np.cos(p[5])
-
-        g = p[4]*exp(-(((rcen_x-xp)/p[2])**2+
-                          ((rcen_y-yp)/p[3])**2)/2.)
-        return g
-    f1 = gauss2d(x,y,[0.3,0.7,0.038,0.058,0.5,0.])
-    f2 = gauss2d(x,y,[0.3,0.6,0.038,0.058,-0.5,0.])
-    f3 = gauss2d(x,y,[0.45,0.7,0.038,0.058,0.5,0.])
-    f4 = gauss2d(x,y,[0.45,0.6,0.038,0.058,-0.5,0.])
-    f = f1+f2+f3+f4
-    return f
-
-def gauss_3d_dipole(x, y, z):
-    """3D Gaussian dipole profile - to be used to test csd.
-       Parameters
-       ----------
-       x : np.array
-           Spatial x pts. at which the true csd is evaluated
-       y : np.array
-           Spatial y pts. at which the true csd is evaluated
-       z : np.array
-           Spatial z pts. at which the true csd is evaluated
-       Returns
-       -------
-       f : np.array
-           The value of the csd at the requested points
-    """
-    x0, y0, z0 = 0.3, 0.7, 0.3
-    x1, y1, z1 = 0.6, 0.5, 0.7
-    sig_2 = 0.023
-    A = (2*np.pi*sig_2)**-1
-    f1 = A*exp( (-(x-x0)**2 -(y-y0)**2 -(z-z0)**2) / (2*sig_2) )
-    f2 = -1*A*exp( (-(x-x1)**2 -(y-y1)**2 -(z-z1)**2) / (2*sig_2) )
-    f = f1+f2
-    return f
+/annex/objects/MD5-s11986--94ce0292b45fc44a83cbb3b0b7543cc4

+ 1 - 0
code/elephant/elephant/gpfa/__init__.py

@@ -0,0 +1 @@
+/annex/objects/MD5-s153--8ff76fe9e21629f576fb004117f6c6ff

+ 1 - 0
code/elephant/elephant/gpfa/gpfa.py

@@ -0,0 +1 @@
+/annex/objects/MD5-s19498--0cee6d29fe905426d7a13f46670d73cd

+ 1 - 0
code/elephant/elephant/gpfa/gpfa_core.py

@@ -0,0 +1 @@
+/annex/objects/MD5-s20892--8a650e60f9932330c60821dfe16ac1cf

+ 1 - 0
code/elephant/elephant/gpfa/gpfa_util.py

@@ -0,0 +1 @@
+/annex/objects/MD5-s17915--dd5fbf7888cd08cb3ecfdcdadbee861c

+ 1 - 913
code/elephant/elephant/kernels.py

@@ -1,913 +1 @@
-# -*- coding: utf-8 -*-
-"""
-Definition of a hierarchy of classes for kernel functions to be used
-in convolution, e.g., for data smoothing (low pass filtering) or
-firing rate estimation.
-
-
-Symmetric kernels
-~~~~~~~~~~~~~~~~~
-
-.. autosummary::
-    :toctree: toctree/kernels/
-
-    RectangularKernel
-    TriangularKernel
-    EpanechnikovLikeKernel
-    GaussianKernel
-    LaplacianKernel
-
-Asymmetric kernels
-~~~~~~~~~~~~~~~~~~
-
-.. autosummary::
-    :toctree: toctree/kernels/
-
-    ExponentialKernel
-    AlphaKernel
-
-
-Examples
---------
->>> import quantities as pq
->>> kernel1 = GaussianKernel(sigma=100*pq.ms)
->>> kernel2 = ExponentialKernel(sigma=8*pq.ms, invert=True)
-
-:copyright: Copyright 2016 by the Elephant team, see `doc/authors.rst`.
-:license: Modified BSD, see LICENSE.txt for details.
-"""
-
-from __future__ import division, print_function, unicode_literals
-
-import math
-
-import numpy as np
-import quantities as pq
-import scipy.optimize
-import scipy.special
-import scipy.stats
-
-from elephant.utils import deprecated_alias
-
-__all__ = [
-    'RectangularKernel', 'TriangularKernel', 'EpanechnikovLikeKernel',
-    'GaussianKernel', 'LaplacianKernel', 'ExponentialKernel', 'AlphaKernel'
-]
-
-
-class Kernel(object):
-    r"""
-    This is the base class for commonly used kernels.
-
-    **General definition of a kernel:**
-
-    A function :math:`K(x, y)` is called a kernel function if
-    :math:`\int{K(x, y) g(x) g(y) \textrm{d}x \textrm{d}y} \ \geq 0 \quad
-    \forall g \in L_2`
-
-
-    **Currently implemented kernels are:**
-
-        * rectangular
-        * triangular
-        * epanechnikovlike
-        * gaussian
-        * laplacian
-        * exponential (asymmetric)
-        * alpha function (asymmetric)
-
-    In neuroscience, a popular application of kernels is in performing
-    smoothing operations via convolution. In this case, the kernel has the
-    properties of a probability density, i.e., it is positive and normalized
-    to one. Popular choices are the rectangular or Gaussian kernels.
-
-    Exponential and alpha kernels may also be used to represent the
-    postsynaptic current/potentials in a linear (current-based) model.
-
-    Parameters
-    ----------
-    sigma : pq.Quantity
-        Standard deviation of the kernel.
-    invert : bool, optional
-        If True, asymmetric kernels (e.g., exponential or alpha kernels) are
-        inverted along the time axis.
-        Default: False.
-
-    Raises
-    ------
-    TypeError
-        If `sigma` is not `pq.Quantity`.
-
-        If `sigma` is negative.
-
-        If `invert` is not `bool`.
-
-    """
-
-    def __init__(self, sigma, invert=False):
-        if not isinstance(sigma, pq.Quantity):
-            raise TypeError("'sigma' must be a quantity")
-
-        if sigma.magnitude < 0:
-            raise ValueError("'sigma' cannot be negative")
-
-        if not isinstance(invert, bool):
-            raise ValueError("'invert' must be bool")
-
-        self.sigma = sigma
-        self.invert = invert
-
-    def __repr__(self):
-        return "{cls}(sigma={sigma}, invert={invert})".format(
-            cls=self.__class__.__name__, sigma=self.sigma, invert=self.invert)
-
-    @deprecated_alias(t='times')
-    def __call__(self, times):
-        """
-        Evaluates the kernel at all points in the array `times`.
-
-        Parameters
-        ----------
-        times : pq.Quantity
-            A vector with time intervals on which the kernel is evaluated.
-
-        Returns
-        -------
-        pq.Quantity
-            Vector with the result of the kernel evaluations.
-
-        Raises
-        ------
-        TypeError
-            If `times` is not `pq.Quantity`.
-
-            If the dimensionality of `times` and :attr:`sigma` are different.
-
-        """
-        self._check_time_input(times)
-        return self._evaluate(times)
-
-    def _evaluate(self, times):
-        """
-        Evaluates the kernel Probability Density Function, PDF.
-
-        Parameters
-        ----------
-        times : pq.Quantity
-            Vector with the interval on which the kernel is evaluated, not
-            necessarily a time interval.
-
-        Returns
-        -------
-        pq.Quantity
-            Vector with the result of the kernel evaluation.
-
-        """
-        raise NotImplementedError(
-            "The Kernel class should not be used directly, "
-            "instead the subclasses for the single kernels.")
-
-    def boundary_enclosing_area_fraction(self, fraction):
-        """
-        Calculates the boundary :math:`b` so that the integral from
-        :math:`-b` to :math:`b` encloses a certain fraction of the
-        integral over the complete kernel.
-
-        By definition the returned value is hence non-negative, even if the
-        whole probability mass of the kernel is concentrated over negative
-        support for inverted kernels.
-
-        Parameters
-        ----------
-        fraction : float
-            Fraction of the whole area which has to be enclosed.
-
-        Returns
-        -------
-        pq.Quantity
-            Boundary of the kernel containing area `fraction` under the
-            kernel density.
-
-        Raises
-        ------
-        ValueError
-            If `fraction` was chosen too close to one, such that in
-            combination with integral approximation errors the calculation of
-            a boundary was not possible.
-
-        """
-        raise NotImplementedError(
-            "The Kernel class should not be used directly, "
-            "instead the subclasses for the single kernels.")
-
-    def _check_fraction(self, fraction):
-        """
-        Checks the input variable of the method
-        :attr:`boundary_enclosing_area_fraction` for validity of type and
-        value.
-
-        Parameters
-        ----------
-        fraction : float or int
-            Fraction of the area under the kernel function.
-
-        Raises
-        ------
-        TypeError
-            If `fraction` is neither a float nor an int.
-
-            If `fraction` is not in the interval [0, 1).
-
-        """
-        if not isinstance(fraction, (float, int)):
-            raise TypeError("`fraction` must be float or integer")
-        if isinstance(self, (TriangularKernel, RectangularKernel)):
-            valid = 0 <= fraction <= 1
-            bracket = ']'
-        else:
-            valid = 0 <= fraction < 1
-            bracket = ')'
-        if not valid:
-            raise ValueError("`fraction` must be in the interval "
-                             "[0, 1{}".format(bracket))
-
-    def _check_time_input(self, t):
-        if not isinstance(t, pq.Quantity):
-            raise TypeError("The argument 't' of the kernel callable must be "
-                            "of type Quantity")
-
-        if t.dimensionality.simplified != self.sigma.dimensionality.simplified:
-            raise TypeError("The dimensionality of sigma and the input array "
-                            "to the callable kernel object must be the same. "
-                            "Otherwise a normalization to 1 of the kernel "
-                            "cannot be performed.")
-
-    @deprecated_alias(t='time')
-    def cdf(self, time):
-        r"""
-        Cumulative Distribution Function, CDF.
-
-        Parameters
-        ----------
-        time : pq.Quantity
-            The input time scalar.
-
-        Returns
-        -------
-        float
-            CDF at `time`.
-
-        """
-        raise NotImplementedError
-
-    def icdf(self, fraction):
-        r"""
-        Inverse Cumulative Distribution Function, ICDF, also known as a
-        quantile.
-
-        Parameters
-        ----------
-        fraction : float
-            The fraction of CDF to compute the quantile from.
-
-        Returns
-        -------
-        pq.Quantity
-            The time scalar `times` such that `CDF(t) = fraction`.
-
-        """
-        raise NotImplementedError
-
-    @deprecated_alias(t='times')
-    def median_index(self, times):
-        r"""
-        Estimates the index of the Median of the kernel.
-
-        We define the Median index :math:`i` of a kernel as:
-
-        .. math::
-            t_i = \text{ICDF}\left( \frac{\text{CDF}(t_0) +
-            \text{CDF}(t_{N-1})}{2} \right)
-
-        where :math:`t_0` and :math:`t_{N-1}` are the first and last entries of
-        the input array, CDF and ICDF stand for Cumulative Distribution
-        Function and its Inverse, respectively.
-
-        This function is not mandatory for symmetrical kernels but it is
-        required when asymmetrical kernels have to be aligned at their median.
-
-        Parameters
-        ----------
-        times : pq.Quantity
-            Vector with the interval on which the kernel is evaluated.
-
-        Returns
-        -------
-        int
-            Index of the estimated value of the kernel median.
-
-        Raises
-        ------
-        TypeError
-            If the input array is not a time pq.Quantity array.
-
-        ValueError
-            If the input array is empty.
-            If the input array is not sorted.
-
-        See Also
-        --------
-        Kernel.cdf : cumulative distribution function
-        Kernel.icdf : inverse cumulative distribution function
-
-        """
-        self._check_time_input(times)
-        if len(times) == 0:
-            raise ValueError("The input time array is empty.")
-        if len(times) <= 2:
-            # either left or right; choose left
-            return 0
-        is_sorted = (np.diff(times.magnitude) >= 0).all()
-        if not is_sorted:
-            raise ValueError("The input time array must be sorted (in "
-                             "ascending order).")
-        cdf_mean = 0.5 * (self.cdf(times[0]) + self.cdf(times[-1]))
-        if cdf_mean == 0.:
-            # any index of the kernel non-support is valid; choose median
-            return len(times) // 2
-        icdf = self.icdf(fraction=cdf_mean)
-        icdf = icdf.rescale(times.units).magnitude
-        # icdf is guaranteed to be in (t_start, t_end) interval
-        median_index = np.nonzero(times.magnitude >= icdf)[0][0]
-        return median_index
-
-    def is_symmetric(self):
-        r"""
-        True for symmetric kernels and False otherwise (asymmetric kernels).
-
-        A kernel is symmetric if its PDF is symmetric w.r.t. time:
-
-        .. math::
-            \text{pdf}(-t) = \text{pdf}(t)
-
-        Returns
-        -------
-        bool
-            Whether the kernels is symmetric or not.
-        """
-        return isinstance(self, SymmetricKernel)
-
-    @property
-    def min_cutoff(self):
-        """
-        Half width of the kernel.
-
-        Returns
-        -------
-        float
-            The returned value varies according to the kernel type.
-        """
-        raise NotImplementedError
-
-
-class SymmetricKernel(Kernel):
-    """
-    Base class for symmetric kernels.
-    """
-
-
-class RectangularKernel(SymmetricKernel):
-    r"""
-    Class for rectangular kernels.
-
-    .. math::
-        K(t) = \left\{\begin{array}{ll} \frac{1}{2 \tau}, & |t| < \tau \\
-        0, & |t| \geq \tau \end{array} \right.
-
-    with :math:`\tau = \sqrt{3} \sigma` corresponding to the half width
-    of the kernel.
-
-    The parameter `invert` has no effect on symmetric kernels.
-
-    Examples
-    --------
-
-    .. plot::
-       :include-source:
-
-       from elephant import kernels
-       import quantities as pq
-       import numpy as np
-       import matplotlib.pyplot as plt
-
-       time_array = np.linspace(-3, 3, num=100) * pq.s
-       kernel = kernels.RectangularKernel(sigma=1*pq.s)
-       kernel_time = kernel(time_array)
-       plt.plot(time_array, kernel_time)
-       plt.title("RectangularKernel with sigma=1s")
-       plt.xlabel("time, s")
-       plt.ylabel("kernel, 1/s")
-       plt.show()
-
-    """
-
-    @property
-    def min_cutoff(self):
-        min_cutoff = np.sqrt(3.0)
-        return min_cutoff
-
-    def _evaluate(self, times):
-        t_units = times.units
-        t_abs = np.abs(times.magnitude)
-        tau = math.sqrt(3) * self.sigma.rescale(t_units).magnitude
-        kernel = (t_abs < tau) * 1 / (2 * tau)
-        kernel = pq.Quantity(kernel, units=1 / t_units)
-        return kernel
-
-    @deprecated_alias(t='time')
-    def cdf(self, time):
-        self._check_time_input(time)
-        tau = math.sqrt(3) * self.sigma.rescale(time.units).magnitude
-        time = np.clip(time.magnitude, a_min=-tau, a_max=tau)
-        cdf = (time + tau) / (2 * tau)
-        return cdf
-
-    def icdf(self, fraction):
-        self._check_fraction(fraction)
-        tau = math.sqrt(3) * self.sigma
-        icdf = tau * (2 * fraction - 1)
-        return icdf
-
-    def boundary_enclosing_area_fraction(self, fraction):
-        self._check_fraction(fraction)
-        return np.sqrt(3.0) * self.sigma * fraction
-
-
-class TriangularKernel(SymmetricKernel):
-    r"""
-    Class for triangular kernels.
-
-    .. math::
-        K(t) = \left\{ \begin{array}{ll} \frac{1}{\tau} (1
-        - \frac{|t|}{\tau}), & |t| < \tau \\
-         0, & |t| \geq \tau \end{array} \right.
-
-    with :math:`\tau = \sqrt{6} \sigma` corresponding to the half width of
-    the kernel.
-
-    The parameter `invert` has no effect on symmetric kernels.
-
-    Examples
-    --------
-
-    .. plot::
-       :include-source:
-
-       from elephant import kernels
-       import quantities as pq
-       import numpy as np
-       import matplotlib.pyplot as plt
-
-       time_array = np.linspace(-3, 3, num=1000) * pq.s
-       kernel = kernels.TriangularKernel(sigma=1*pq.s)
-       kernel_time = kernel(time_array)
-       plt.plot(time_array, kernel_time)
-       plt.title("TriangularKernel with sigma=1s")
-       plt.xlabel("time, s")
-       plt.ylabel("kernel, 1/s")
-       plt.show()
-
-    """
-
-    @property
-    def min_cutoff(self):
-        min_cutoff = np.sqrt(6.0)
-        return min_cutoff
-
-    def _evaluate(self, times):
-        tau = math.sqrt(6) * self.sigma.rescale(times.units).magnitude
-        kernel = scipy.stats.triang.pdf(times.magnitude, c=0.5, loc=-tau,
-                                        scale=2 * tau)
-        kernel = pq.Quantity(kernel, units=1 / times.units)
-        return kernel
-
-    @deprecated_alias(t='time')
-    def cdf(self, time):
-        self._check_time_input(time)
-        tau = math.sqrt(6) * self.sigma.rescale(time.units).magnitude
-        cdf = scipy.stats.triang.cdf(time.magnitude, c=0.5, loc=-tau,
-                                     scale=2 * tau)
-        return cdf
-
-    def icdf(self, fraction):
-        self._check_fraction(fraction)
-        tau = math.sqrt(6) * self.sigma.magnitude
-        icdf = scipy.stats.triang.ppf(fraction, c=0.5, loc=-tau, scale=2 * tau)
-        return icdf * self.sigma.units
-
-    def boundary_enclosing_area_fraction(self, fraction):
-        self._check_fraction(fraction)
-        return np.sqrt(6.0) * self.sigma * (1 - np.sqrt(1 - fraction))
-
-
-class EpanechnikovLikeKernel(SymmetricKernel):
-    r"""
-    Class for Epanechnikov-like kernels.
-
-    .. math::
-        K(t) = \left\{\begin{array}{ll} (3 /(4 d)) (1 - (t / d)^2),
-        & |t| < d \\
-        0, & |t| \geq d \end{array} \right.
-
-    with :math:`d = \sqrt{5} \sigma` being the half width of the kernel.
-
-    The Epanechnikov kernel under full consideration of its axioms has a half
-    width of :math:`\sqrt{5}`. Ignoring one axiom also the respective kernel
-    with half width = 1 can be called Epanechnikov kernel [1]_.
-    However, arbitrary width of this type of kernel is here preferred to be
-    called 'Epanechnikov-like' kernel.
-
-    The parameter `invert` has no effect on symmetric kernels.
-
-    References
-    ----------
-    .. [1] https://de.wikipedia.org/wiki/Epanechnikov-Kern
-
-    Examples
-    --------
-
-    .. plot::
-       :include-source:
-
-       from elephant import kernels
-       import quantities as pq
-       import numpy as np
-       import matplotlib.pyplot as plt
-
-       time_array = np.linspace(-3, 3, num=100) * pq.s
-       kernel = kernels.EpanechnikovLikeKernel(sigma=1*pq.s)
-       kernel_time = kernel(time_array)
-       plt.plot(time_array, kernel_time)
-       plt.title("EpanechnikovLikeKernel with sigma=1s")
-       plt.xlabel("time, s")
-       plt.ylabel("kernel, 1/s")
-       plt.show()
-
-    """
-
-    @property
-    def min_cutoff(self):
-        min_cutoff = np.sqrt(5.0)
-        return min_cutoff
-
-    def _evaluate(self, times):
-        tau = math.sqrt(5) * self.sigma.rescale(times.units).magnitude
-        t_div_tau = np.clip(times.magnitude / tau, a_min=-1, a_max=1)
-        kernel = 3. / (4. * tau) * np.maximum(0., 1 - t_div_tau ** 2)
-        kernel = pq.Quantity(kernel, units=1 / times.units)
-        return kernel
-
-    @deprecated_alias(t='time')
-    def cdf(self, time):
-        self._check_time_input(time)
-        tau = math.sqrt(5) * self.sigma.rescale(time.units).magnitude
-        t_div_tau = np.clip(time.magnitude / tau, a_min=-1, a_max=1)
-        cdf = 3. / 4 * (t_div_tau - t_div_tau ** 3 / 3.) + 0.5
-        return cdf
-
-    def icdf(self, fraction):
-        self._check_fraction(fraction)
-        # CDF(t) = -1/4 t^3 + 3/4 t + 1/2
-        coefs = [-1. / 4, 0, 3. / 4, 0.5 - fraction]
-        roots = np.roots(coefs)
-        icdf = next(root for root in roots if -1 <= root <= 1)
-        tau = math.sqrt(5) * self.sigma
-        return icdf * tau
-
-    def boundary_enclosing_area_fraction(self, fraction):
-        r"""
-        Refer to :func:`Kernel.boundary_enclosing_area_fraction` for the
-        documentation.
-
-        Notes
-        -----
-        For Epanechnikov-like kernels, integration of its density within
-        the boundaries 0 and :math:`b`, and then solving for :math:`b` leads
-        to the problem of finding the roots of a polynomial of third order.
-        The implemented formulas are based on the solution of this problem
-        given in [1]_, where the following 3 solutions are given:
-
-        * :math:`u_1 = 1`, solution on negative side;
-        * :math:`u_2 = \frac{-1 + i\sqrt{3}}{2}`, solution for larger
-          values than zero crossing of the density;
-        * :math:`u_3 = \frac{-1 - i\sqrt{3}}{2}`, solution for smaller
-          values than zero crossing of the density.
-
-        The solution :math:`u_3` is the relevant one for the problem at hand,
-        since it involves only positive area contributions.
-
-        References
-        ----------
-        .. [1] https://en.wikipedia.org/wiki/Cubic_function
-
-        """
-        self._check_fraction(fraction)
-        # Python's complex-operator cannot handle quantities, hence the
-        # following construction on quantities is necessary:
-        Delta_0 = complex(1.0 / (5.0 * self.sigma.magnitude ** 2), 0) / \
-            self.sigma.units ** 2
-        Delta_1 = complex(2.0 * np.sqrt(5.0) * fraction /
-                          (25.0 * self.sigma.magnitude ** 3), 0) / \
-            self.sigma.units ** 3
-        C = ((Delta_1 + (Delta_1 ** 2.0 - 4.0 * Delta_0 ** 3.0) ** (
-            1.0 / 2.0)) /
-            2.0) ** (1.0 / 3.0)
-        u_3 = complex(-1.0 / 2.0, -np.sqrt(3.0) / 2.0)
-        b = -5.0 * self.sigma ** 2 * (u_3 * C + Delta_0 / (u_3 * C))
-        return b.real
-
-
-class GaussianKernel(SymmetricKernel):
-    r"""
-    Class for gaussian kernels.
-
-    .. math::
-        K(t) = (\frac{1}{\sigma \sqrt{2 \pi}}) \exp(-\frac{t^2}{2 \sigma^2})
-
-    with :math:`\sigma` being the standard deviation.
-
-    The parameter `invert` has no effect on symmetric kernels.
-
-    Examples
-    --------
-
-    .. plot::
-       :include-source:
-
-       from elephant import kernels
-       import quantities as pq
-       import numpy as np
-       import matplotlib.pyplot as plt
-
-       time_array = np.linspace(-3, 3, num=100) * pq.s
-       kernel = kernels.GaussianKernel(sigma=1*pq.s)
-       kernel_time = kernel(time_array)
-       plt.plot(time_array, kernel_time)
-       plt.title("GaussianKernel with sigma=1s")
-       plt.xlabel("time, s")
-       plt.ylabel("kernel, 1/s")
-       plt.show()
-
-    """
-
-    @property
-    def min_cutoff(self):
-        min_cutoff = 3.0
-        return min_cutoff
-
-    def _evaluate(self, times):
-        sigma = self.sigma.rescale(times.units).magnitude
-        kernel = scipy.stats.norm.pdf(times.magnitude, loc=0, scale=sigma)
-        kernel = pq.Quantity(kernel, units=1 / times.units)
-        return kernel
-
-    @deprecated_alias(t='time')
-    def cdf(self, time):
-        self._check_time_input(time)
-        sigma = self.sigma.rescale(time.units).magnitude
-        cdf = scipy.stats.norm.cdf(time, loc=0, scale=sigma)
-        return cdf
-
-    def icdf(self, fraction):
-        self._check_fraction(fraction)
-        icdf = scipy.stats.norm.ppf(fraction, loc=0,
-                                    scale=self.sigma.magnitude)
-        return icdf * self.sigma.units
-
-    def boundary_enclosing_area_fraction(self, fraction):
-        self._check_fraction(fraction)
-        return self.sigma * np.sqrt(2.0) * scipy.special.erfinv(fraction)
-
-
-class LaplacianKernel(SymmetricKernel):
-    r"""
-    Class for laplacian kernels.
-
-    .. math::
-        K(t) = \frac{1}{2 \tau} \exp\left(-\left|\frac{t}{\tau}\right|\right)
-
-    with :math:`\tau = \sigma / \sqrt{2}`.
-
-    The parameter `invert` has no effect on symmetric kernels.
-
-    Examples
-    --------
-
-    .. plot::
-       :include-source:
-
-       from elephant import kernels
-       import quantities as pq
-       import numpy as np
-       import matplotlib.pyplot as plt
-
-       time_array = np.linspace(-3, 3, num=1000) * pq.s
-       kernel = kernels.LaplacianKernel(sigma=1*pq.s)
-       kernel_time = kernel(time_array)
-       plt.plot(time_array, kernel_time)
-       plt.title("LaplacianKernel with sigma=1s")
-       plt.xlabel("time, s")
-       plt.ylabel("kernel, 1/s")
-       plt.show()
-
-    """
-
-    @property
-    def min_cutoff(self):
-        min_cutoff = 3.0
-        return min_cutoff
-
-    def _evaluate(self, times):
-        tau = self.sigma.rescale(times.units).magnitude / math.sqrt(2)
-        kernel = scipy.stats.laplace.pdf(times.magnitude, loc=0, scale=tau)
-        kernel = pq.Quantity(kernel, units=1 / times.units)
-        return kernel
-
-    @deprecated_alias(t='time')
-    def cdf(self, time):
-        self._check_time_input(time)
-        tau = self.sigma.rescale(time.units).magnitude / math.sqrt(2)
-        cdf = scipy.stats.laplace.cdf(time.magnitude, loc=0, scale=tau)
-        return cdf
-
-    def icdf(self, fraction):
-        self._check_fraction(fraction)
-        tau = self.sigma.magnitude / math.sqrt(2)
-        icdf = scipy.stats.laplace.ppf(fraction, loc=0, scale=tau)
-        return icdf * self.sigma.units
-
-    def boundary_enclosing_area_fraction(self, fraction):
-        self._check_fraction(fraction)
-        return -self.sigma * np.log(1.0 - fraction) / np.sqrt(2.0)
-
-
-# Potential further symmetric kernels from Wiki Kernels (statistics):
-# Quartic (biweight), Triweight, Tricube, Cosine, Logistics, Silverman
-
-
-class ExponentialKernel(Kernel):
-    r"""
-    Class for exponential kernels.
-
-    .. math::
-        K(t) = \left\{\begin{array}{ll} (1 / \tau) \exp{(-t / \tau)},
-        & t > 0 \\
-        0, & t \leq 0 \end{array} \right.
-
-    with :math:`\tau = \sigma`.
-
-    Examples
-    --------
-
-    .. plot::
-       :include-source:
-
-       from elephant import kernels
-       import quantities as pq
-       import numpy as np
-       import matplotlib.pyplot as plt
-
-       time_array = np.linspace(-1, 4, num=100) * pq.s
-       kernel = kernels.ExponentialKernel(sigma=1*pq.s)
-       kernel_time = kernel(time_array)
-       plt.plot(time_array, kernel_time)
-       plt.title("ExponentialKernel with sigma=1s")
-       plt.xlabel("time, s")
-       plt.ylabel("kernel, 1/s")
-       plt.show()
-
-
-    """
-
-    @property
-    def min_cutoff(self):
-        min_cutoff = 3.0
-        return min_cutoff
-
-    def _evaluate(self, times):
-        tau = self.sigma.rescale(times.units).magnitude
-        if self.invert:
-            times = -times
-        kernel = scipy.stats.expon.pdf(times.magnitude, loc=0, scale=tau)
-        kernel = pq.Quantity(kernel, units=1 / times.units)
-        return kernel
-
-    @deprecated_alias(t='time')
-    def cdf(self, time):
-        self._check_time_input(time)
-        tau = self.sigma.rescale(time.units).magnitude
-        time = time.magnitude
-        if self.invert:
-            time = np.minimum(time, 0)
-            return np.exp(time / tau)
-        time = np.maximum(time, 0)
-        return 1. - np.exp(-time / tau)
-
-    def icdf(self, fraction):
-        self._check_fraction(fraction)
-        if self.invert:
-            return self.sigma * np.log(fraction)
-        return -self.sigma * np.log(1.0 - fraction)
-
-    def boundary_enclosing_area_fraction(self, fraction):
-        # the boundary b, which encloses a 'fraction' of CDF in [-b, b] range,
-        # does not depend on the invert, if the kernel is cut at zero.
-        # It's easier to compute 'b' for a kernel that has not been inverted.
-        kernel = self.__class__(sigma=self.sigma, invert=False)
-        return kernel.icdf(fraction)
-
-
-class AlphaKernel(Kernel):
-    r"""
-    Class for alpha kernels.
-
-    .. math::
-        K(t) = \left\{\begin{array}{ll} (1 / \tau^2)
-        \ t\ \exp{(-t / \tau)}, & t > 0 \\
-        0, & t \leq 0 \end{array} \right.
-
-    with :math:`\tau = \sigma / \sqrt{2}`.
-
-    Examples
-    --------
-
-    .. plot::
-       :include-source:
-
-       from elephant import kernels
-       import quantities as pq
-       import numpy as np
-       import matplotlib.pyplot as plt
-
-       time_array = np.linspace(-1, 4, num=100) * pq.s
-       kernel = kernels.AlphaKernel(sigma=1*pq.s)
-       kernel_time = kernel(time_array)
-       plt.plot(time_array, kernel_time)
-       plt.title("AlphaKernel with sigma=1s")
-       plt.xlabel("time, s")
-       plt.ylabel("kernel, 1/s")
-       plt.show()
-
-    """
-
-    @property
-    def min_cutoff(self):
-        min_cutoff = 3.0
-        return min_cutoff
-
-    def _evaluate(self, times):
-        t_units = times.units
-        tau = self.sigma.rescale(t_units).magnitude / math.sqrt(2)
-        times = times.magnitude
-        if self.invert:
-            times = -times
-        kernel = (times >= 0) * 1 / tau ** 2 * times * np.exp(-times / tau)
-        kernel = pq.Quantity(kernel, units=1 / t_units)
-        return kernel
-
-    @deprecated_alias(t='time')
-    def cdf(self, time):
-        self._check_time_input(time)
-        tau = self.sigma.rescale(time.units).magnitude / math.sqrt(2)
-        cdf = self._cdf_stripped(time.magnitude, tau)
-        return cdf
-
-    def _cdf_stripped(self, t, tau):
-        # CDF without time units
-        if self.invert:
-            t = np.minimum(t, 0)
-            return np.exp(t / tau) * (tau - t) / tau
-        t = np.maximum(t, 0)
-        return 1 - np.exp(-t / tau) * (t + tau) / tau
-
-    def icdf(self, fraction):
-        self._check_fraction(fraction)
-        tau = self.sigma.magnitude / math.sqrt(2)
-
-        def cdf(x):
-            # CDF fof the AlphaKernel, subtracted 'fraction'
-            # evaluates the error of the root of cdf(x) = fraction
-            return self._cdf_stripped(x, tau) - fraction
-
-        # fraction is a good starting point for CDF approximation
-        x0 = fraction if not self.invert else fraction - 1
-        x_quantile = scipy.optimize.fsolve(cdf, x0=x0, xtol=1e-7)[0]
-        x_quantile = pq.Quantity(x_quantile, units=self.sigma.units)
-        return x_quantile
-
-    def boundary_enclosing_area_fraction(self, fraction):
-        # the boundary b, which encloses a 'fraction' of CDF in [-b, b] range,
-        # does not depend on the invert, if the kernel is cut at zero.
-        # It's easier to compute 'b' for a kernel that has not been inverted.
-        kernel = self.__class__(sigma=self.sigma, invert=False)
-        return kernel.icdf(fraction)
+/annex/objects/MD5-s27594--acc566c03d2508653284e2b0c1d51c47

+ 1 - 228
code/elephant/elephant/neo_tools.py

@@ -1,228 +1 @@
-# -*- coding: utf-8 -*-
-"""
-Tools to manipulate Neo objects.
-
-:copyright: Copyright 2014-2016 by the Elephant team, see `doc/authors.rst`.
-:license: Modified BSD, see LICENSE.txt for details.
-"""
-
-from __future__ import division, print_function, unicode_literals
-import warnings
-
-from itertools import chain
-
-from neo.core.container import unique_objs
-from elephant.utils import deprecated_alias
-
-__all__ = [
-    "extract_neo_attributes",
-    "get_all_spiketrains",
-    "get_all_events",
-    "get_all_epochs"
-]
-
-
-@deprecated_alias(obj='neo_object')
-def extract_neo_attributes(neo_object, parents=True, child_first=True,
-                           skip_array=False, skip_none=False):
-    """
-    Given a Neo object, return a dictionary of attributes and annotations.
-
-    Parameters
-    ----------
-    neo_object : neo.BaseNeo
-        Object to get attributes and annotations.
-    parents : bool, optional
-        If True, also include attributes and annotations from parent Neo
-        objects (if any).
-        Default: True.
-    child_first : bool, optional
-        If True, values of child attributes are used over parent attributes in
-        the event of a name conflict.
-        If False, parent attributes are used.
-        This parameter does nothing if `parents` is False.
-        Default: True.
-    skip_array : bool, optional
-        If True, skip attributes that store non-scalar array values.
-        Default: False.
-    skip_none : bool, optional
-        If True, skip annotations and attributes that have a value of None.
-        Default: False.
-
-    Returns
-    -------
-    dict
-        A dictionary where the keys are annotations or attribute names and
-        the values are the corresponding annotation or attribute value.
-
-    """
-    attrs = neo_object.annotations.copy()
-    if not skip_array and hasattr(neo_object, "array_annotations"):
-        # Exclude labels and durations, and any other fields that should not
-        # be a part of array_annotation.
-        required_keys = set(neo_object.array_annotations).difference(
-            dir(neo_object))
-        for a in required_keys:
-            if "array_annotations" not in attrs:
-                attrs["array_annotations"] = {}
-            attrs["array_annotations"][a] = \
-                neo_object.array_annotations[a].copy()
-    for attr in neo_object._necessary_attrs + neo_object._recommended_attrs:
-        if skip_array and len(attr) >= 3 and attr[2]:
-            continue
-        attr = attr[0]
-        if attr == getattr(neo_object, '_quantity_attr', None):
-            continue
-        attrs[attr] = getattr(neo_object, attr, None)
-
-    if skip_none:
-        for attr, value in attrs.copy().items():
-            if value is None:
-                del attrs[attr]
-
-    if not parents:
-        return attrs
-
-    for parent in getattr(neo_object, 'parents', []):
-        if parent is None:
-            continue
-        newattr = extract_neo_attributes(parent, parents=True,
-                                         child_first=child_first,
-                                         skip_array=skip_array,
-                                         skip_none=skip_none)
-        if child_first:
-            newattr.update(attrs)
-            attrs = newattr
-        else:
-            attrs.update(newattr)
-
-    return attrs
-
-
-def extract_neo_attrs(*args, **kwargs):
-    warnings.warn("'extract_neo_attrs' function is deprecated; "
-                  "use 'extract_neo_attributes'", DeprecationWarning)
-    return extract_neo_attributes(*args, **kwargs)
-
-
-def _get_all_objs(container, class_name):
-    """
-    Get all Neo objects of a given type from a container.
-
-    The objects can be any list, dict, or other iterable or mapping containing
-    Neo objects of a particular class, as well as any Neo object that can hold
-    the object.
-    Objects are searched recursively, so the objects can be nested (such as a
-    list of blocks).
-
-    Parameters
-    ----------
-    container : list, tuple, iterable, dict, neo.Container
-                The container for the Neo objects.
-    class_name : str
-                The name of the class, with proper capitalization
-                (i.e., 'SpikeTrain', not 'Spiketrain' or 'spiketrain').
-
-    Returns
-    -------
-    list
-        A list of unique Neo objects.
-
-    Raises
-    ------
-    ValueError
-        If can not handle containers of the type passed in `container`.
-
-    """
-    if container.__class__.__name__ == class_name:
-        return [container]
-    classholder = class_name.lower() + 's'
-    if hasattr(container, classholder):
-        vals = getattr(container, classholder)
-    elif hasattr(container, 'list_children_by_class'):
-        vals = container.list_children_by_class(class_name)
-    elif hasattr(container, 'values') and not hasattr(container, 'ndim'):
-        vals = container.values()
-    elif hasattr(container, '__iter__') and not hasattr(container, 'ndim'):
-        vals = container
-    else:
-        raise ValueError('Cannot handle object of type %s' % type(container))
-    res = list(chain.from_iterable(_get_all_objs(obj, class_name)
-                                   for obj in vals))
-    return unique_objs(res)
-
-
-def get_all_spiketrains(container):
-    """
-    Get all `neo.Spiketrain` objects from a container.
-
-    The objects can be any list, dict, or other iterable or mapping containing
-    spiketrains, as well as any Neo object that can hold spiketrains:
-    `neo.Block`, `neo.ChannelIndex`, `neo.Unit`, and `neo.Segment`.
-
-    Containers are searched recursively, so the objects can be nested
-    (such as a list of blocks).
-
-    Parameters
-    ----------
-    container : list, tuple, iterable, dict, neo.Block, neo.Segment, neo.Unit,
-        neo.ChannelIndex
-        The container for the spiketrains.
-
-    Returns
-    -------
-    list
-        A list of the unique `neo.SpikeTrain` objects in `container`.
-
-    """
-    return _get_all_objs(container, 'SpikeTrain')
-
-
-def get_all_events(container):
-    """
-    Get all `neo.Event` objects from a container.
-
-    The objects can be any list, dict, or other iterable or mapping containing
-    events, as well as any neo object that can hold events:
-    `neo.Block` and `neo.Segment`.
-
-    Containers are searched recursively, so the objects can be nested
-    (such as a list of blocks).
-
-    Parameters
-    ----------
-    container : list, tuple, iterable, dict, neo.Block, neo.Segment
-                The container for the events.
-
-    Returns
-    -------
-    list
-        A list of the unique `neo.Event` objects in `container`.
-
-    """
-    return _get_all_objs(container, 'Event')
-
-
-def get_all_epochs(container):
-    """
-    Get all `neo.Epoch` objects from a container.
-
-    The objects can be any list, dict, or other iterable or mapping containing
-    epochs, as well as any neo object that can hold epochs:
-    `neo.Block` and `neo.Segment`.
-
-    Containers are searched recursively, so the objects can be nested
-    (such as a list of blocks).
-
-    Parameters
-    ----------
-    container : list, tuple, iterable, dict, neo.Block, neo.Segment
-                The container for the epochs.
-
-    Returns
-    -------
-    list
-        A list of the unique `neo.Epoch` objects in `container`.
-
-    """
-    return _get_all_objs(container, 'Epoch')
+/annex/objects/MD5-s7271--5aa8b45ef7c83cf1c429055009b32bb2

+ 1 - 618
code/elephant/elephant/pandas_bridge.py

@@ -1,618 +1 @@
-# -*- coding: utf-8 -*-
-"""
-Bridge to the pandas library.
-
-:copyright: Copyright 2014-2016 by the Elephant team, see `doc/authors.rst`.
-:license: Modified BSD, see LICENSE.txt for details.
-"""
-
-from __future__ import division, print_function, unicode_literals
-
-import numpy as np
-import pandas as pd
-import warnings
-import quantities as pq
-
-from elephant.neo_tools import (extract_neo_attributes, get_all_epochs,
-                                get_all_events, get_all_spiketrains)
-
-
-warnings.simplefilter('once', DeprecationWarning)
-warnings.warn("pandas_bridge module will be removed in Elephant v0.8.x",
-              DeprecationWarning)
-
-
-def _multiindex_from_dict(inds):
-    """Given a dictionary, return a `pandas.MultiIndex`.
-
-    Parameters
-    ----------
-    inds : dict
-           A dictionary where the keys are annotations or attribute names and
-           the values are the corresponding annotation or attribute value.
-
-    Returns
-    -------
-    pandas MultiIndex
-    """
-    names, indexes = zip(*sorted(inds.items()))
-    return pd.MultiIndex.from_tuples([indexes], names=names)
-
-
-def _sort_inds(obj, axis=0):
-    """Put the indexes and index levels of a pandas object in sorted order.
-
-    Paramters
-    ---------
-    obj : pandas Series, DataFrame, Panel, or Panel4D
-          The object whose indexes should be sorted.
-    axis : int, list, optional, 'all'
-           The axis whose indexes should be sorted.  Default is 0.
-           Can also be a list of indexes, in which case all of those axes
-           are sorted.  If 'all', sort all indexes.
-
-    Returns
-    -------
-    pandas Series, DataFrame, Panel, or Panel4D
-        A copy of the object with indexes sorted.
-        Indexes are sorted in-place.
-    """
-    if axis == 'all':
-        return _sort_inds(obj, axis=range(obj.ndim))
-
-    if hasattr(axis, '__iter__'):
-        for iax in axis:
-            obj = _sort_inds(obj, iax)
-        return obj
-
-    obj = obj.reorder_levels(sorted(obj.axes[axis].names), axis=axis)
-    return obj.sort_index(level=0, axis=axis, sort_remaining=True)
-
-
-def _extract_neo_attrs_safe(obj, parents=True, child_first=True):
-    """Given a neo object, return a dictionary of attributes and annotations.
-
-    This is done in a manner that is safe for `pandas` indexes.
-
-    Parameters
-    ----------
-
-    obj : neo object
-    parents : bool, optional
-              Also include attributes and annotations from parent neo
-              objects (if any).
-    child_first : bool, optional
-                  If True (default True), values of child attributes are used
-                  over parent attributes in the event of a name conflict.
-                  If False, parent attributes are used.
-                  This parameter does nothing if `parents` is False.
-
-    Returns
-    -------
-
-    dict
-        A dictionary where the keys are annotations or attribute names and
-        the values are the corresponding annotation or attribute value.
-
-    """
-    res = extract_neo_attributes(obj, skip_array=True, skip_none=True,
-                                 parents=parents, child_first=child_first)
-    for key, value in res.items():
-        res[key] = _convert_value_safe(value)
-        key2 = _convert_value_safe(key)
-        if key2 is not key:
-            res[key2] = res.pop(key)
-
-    return res
-
-
-def _convert_value_safe(value):
-    """Convert `neo` values to a value compatible with `pandas`.
-
-    Some types and dtypes used with neo are not safe to use with pandas in some
-    or all situations.
-
-    `quantities.Quantity` don't follow the normal python rule that values
-    with that are equal should have the same hash, making it fundamentally
-    incompatible with `pandas`.
-
-    On python 3, `pandas` coerces `S` dtypes to bytes, which are not always
-    safe to use.
-
-    Parameters
-    ----------
-
-    value : any
-            Value to convert (if it has any known issues).
-
-    Returns
-    -------
-
-    any
-        `value` or a version of value with potential problems fixed.
-
-    """
-    if hasattr(value, 'dimensionality'):
-        return (value.magnitude.tolist(), str(value.dimensionality))
-    if hasattr(value, 'dtype') and value.dtype.kind == 'S':
-        return value.astype('U').tolist()
-    if hasattr(value, 'tolist'):
-        return value.tolist()
-    if hasattr(value, 'decode') and not hasattr(value, 'encode'):
-        return value.decode('UTF8')
-    return value
-
-
-def spiketrain_to_dataframe(spiketrain, parents=True, child_first=True):
-    """Convert a `neo.SpikeTrain` to a `pandas.DataFrame`.
-
-    The `pandas.DataFrame` object has a single column, with each element
-    being the spike time converted to a `float` value in seconds.
-
-    The column heading is a `pandas.MultiIndex` with one index
-    for each of the scalar attributes and annotations.  The `index`
-    is the spike number.
-
-    Parameters
-    ----------
-
-    spiketrain : neo SpikeTrain
-                 The SpikeTrain to convert.
-    parents : bool, optional
-              Also include attributes and annotations from parent neo
-              objects (if any).
-
-    Returns
-    -------
-
-    pandas DataFrame
-        A DataFrame containing the spike times from `spiketrain`.
-
-    Notes
-    -----
-
-    The index name is `spike_number`.
-
-    Attributes that contain non-scalar values are skipped.  So are
-    annotations or attributes containing a value of `None`.
-
-    `quantity.Quantities` types are incompatible with `pandas`, so attributes
-    and annotations of that type are converted to a tuple where the first
-    element is the scalar value and the second is the string representation of
-    the units.
-
-    """
-    attrs = _extract_neo_attrs_safe(spiketrain,
-                                    parents=parents, child_first=child_first)
-    columns = _multiindex_from_dict(attrs)
-
-    times = spiketrain.magnitude
-    times = pq.Quantity(times, spiketrain.units).rescale('s').magnitude
-    times = times[np.newaxis].T
-
-    index = pd.Index(np.arange(len(spiketrain)), name='spike_number')
-
-    pdobj = pd.DataFrame(times, index=index, columns=columns)
-    return _sort_inds(pdobj, axis=1)
-
-
-def event_to_dataframe(event, parents=True, child_first=True):
-    """Convert a `neo.core.Event` to a `pandas.DataFrame`.
-
-    The `pandas.DataFrame` object has a single column, with each element
-    being the event label from the `event.label` attribute.
-
-    The column heading is a `pandas.MultiIndex` with one index
-    for each of the scalar attributes and annotations.  The `index`
-    is the time stamp from the `event.times` attribute.
-
-    Parameters
-    ----------
-
-    event : neo Event
-            The Event to convert.
-    parents : bool, optional
-              Also include attributes and annotations from parent neo
-              objects (if any).
-    child_first : bool, optional
-                  If True (default True), values of child attributes are used
-                  over parent attributes in the event of a name conflict.
-                  If False, parent attributes are used.
-                  This parameter does nothing if `parents` is False.
-
-    Returns
-    -------
-
-    pandas DataFrame
-        A DataFrame containing the labels from `event`.
-
-    Notes
-    -----
-
-    If the length of event.times and event.labels are not the same,
-    the longer will be truncated to the length of the shorter.
-
-    The index name is `times`.
-
-    Attributes that contain non-scalar values are skipped.  So are
-    annotations or attributes containing a value of `None`.
-
-    `quantity.Quantities` types are incompatible with `pandas`, so attributes
-    and annotations of that type are converted to a tuple where the first
-    element is the scalar value and the second is the string representation of
-    the units.
-
-    """
-    attrs = _extract_neo_attrs_safe(event,
-                                    parents=parents, child_first=child_first)
-    columns = _multiindex_from_dict(attrs)
-
-    times = event.times.rescale('s').magnitude
-    labels = event.labels.astype('U')
-
-    times = times[:len(labels)]
-    labels = labels[:len(times)]
-
-    index = pd.Index(times, name='times')
-
-    pdobj = pd.DataFrame(labels[np.newaxis].T, index=index, columns=columns)
-    return _sort_inds(pdobj, axis=1)
-
-
-def epoch_to_dataframe(epoch, parents=True, child_first=True):
-    """Convert a `neo.core.Epoch` to a `pandas.DataFrame`.
-
-    The `pandas.DataFrame` object has a single column, with each element
-    being the epoch label from the `epoch.label` attribute.
-
-    The column heading is a `pandas.MultiIndex` with one index
-    for each of the scalar attributes and annotations.  The `index`
-    is a `pandas.MultiIndex`, with the first index being the time stamp from
-    the `epoch.times` attribute and the second being the duration from
-    the `epoch.durations` attribute.
-
-    Parameters
-    ----------
-
-    epoch : neo Epoch
-            The Epoch to convert.
-    parents : bool, optional
-              Also include attributes and annotations from parent neo
-              objects (if any).
-    child_first : bool, optional
-                  If True (default True), values of child attributes are used
-                  over parent attributes in the event of a name conflict.
-                  If False, parent attributes are used.
-                  This parameter does nothing if `parents` is False.
-
-    Returns
-    -------
-
-    pandas DataFrame
-        A DataFrame containing the labels from `epoch`.
-
-    Notes
-    -----
-
-    If the length of `epoch.times`, `epoch.duration`, and `epoch.labels` are
-    not the same, the longer will be truncated to the length of the shortest.
-
-    The index names for `epoch.times` and `epoch.durations` are `times` and
-    `durations`, respectively.
-
-    Attributes that contain non-scalar values are skipped.  So are
-    annotations or attributes containing a value of `None`.
-
-    `quantity.Quantities` types are incompatible with `pandas`, so attributes
-    and annotations of that type are converted to a tuple where the first
-    element is the scalar value and the second is the string representation of
-    the units.
-
-    """
-    attrs = _extract_neo_attrs_safe(epoch,
-                                    parents=parents, child_first=child_first)
-    columns = _multiindex_from_dict(attrs)
-
-    times = epoch.times.rescale('s').magnitude
-    durs = epoch.durations.rescale('s').magnitude
-    labels = epoch.labels.astype('U')
-
-    minlen = min([len(durs), len(times), len(labels)])
-    index = pd.MultiIndex.from_arrays([times[:minlen], durs[:minlen]],
-                                      names=['times', 'durations'])
-
-    pdobj = pd.DataFrame(labels[:minlen][np.newaxis].T,
-                         index=index, columns=columns)
-    return _sort_inds(pdobj, axis='all')
-
-
-def _multi_objs_to_dataframe(container, conv_func, get_func,
-                             parents=True, child_first=True):
-    """Convert one or more of a given `neo` object to a `pandas.DataFrame`.
-
-    The objects can be any list, dict, or other iterable or mapping containing
-    the object, as well as any neo object that can hold the object.
-    Objects are searched recursively, so the objects can be nested (such as a
-    list of blocks).
-
-    The column heading is a `pandas.MultiIndex` with one index
-    for each of the scalar attributes and annotations of the respective
-    object.
-
-    Parameters
-    ----------
-
-    container : list, tuple, iterable, dict, neo container object
-                The container for the objects to convert.
-    parents : bool, optional
-              Also include attributes and annotations from parent neo
-              objects (if any).
-    child_first : bool, optional
-                  If True (default True), values of child attributes are used
-                  over parent attributes in the event of a name conflict.
-                  If False, parent attributes are used.
-                  This parameter does nothing if `parents` is False.
-
-    Returns
-    -------
-
-    pandas DataFrame
-        A DataFrame containing the converted objects.
-
-    Attributes that contain non-scalar values are skipped.  So are
-    annotations or attributes containing a value of `None`.
-
-    `quantity.Quantities` types are incompatible with `pandas`, so attributes
-    and annotations of that type are converted to a tuple where the first
-    element is the scalar value and the second is the string representation of
-    the units.
-
-    """
-    res = pd.concat([conv_func(obj, parents=parents, child_first=child_first)
-                     for obj in get_func(container)], axis=1)
-    return _sort_inds(res, axis=1)
-
-
-def multi_spiketrains_to_dataframe(container,
-                                   parents=True, child_first=True):
-    """Convert one or more `neo.SpikeTrain` objects to a `pandas.DataFrame`.
-
-    The objects can be any list, dict, or other iterable or mapping containing
-    spiketrains, as well as any neo object that can hold spiketrains:
-    `neo.Block`, `neo.ChannelIndex`, `neo.Unit`, and `neo.Segment`.
-    Objects are searched recursively, so the objects can be nested (such as a
-    list of blocks).
-
-    The `pandas.DataFrame` object has one column for each spiketrain, with each
-    element being the spike time converted to a `float` value in seconds.
-    columns are padded to the same length with `NaN` values.
-
-    The column heading is a `pandas.MultiIndex` with one index
-    for each of the scalar attributes and annotations of the respective
-    spiketrain.  The `index` is the spike number.
-
-    Parameters
-    ----------
-
-    container : list, tuple, iterable, dict,
-                neo Block, neo Segment, neo Unit, neo ChannelIndex
-                The container for the spiketrains to convert.
-    parents : bool, optional
-              Also include attributes and annotations from parent neo
-              objects (if any).
-    child_first : bool, optional
-                  If True (default True), values of child attributes are used
-                  over parent attributes in the event of a name conflict.
-                  If False, parent attributes are used.
-                  This parameter does nothing if `parents` is False.
-
-    Returns
-    -------
-
-    pandas DataFrame
-        A DataFrame containing the spike times from `container`.
-
-    Notes
-    -----
-
-    The index name is `spike_number`.
-
-    Attributes that contain non-scalar values are skipped.  So are
-    annotations or attributes containing a value of `None`.
-
-    `quantity.Quantities` types are incompatible with `pandas`, so attributes
-    and annotations of that type are converted to a tuple where the first
-    element is the scalar value and the second is the string representation of
-    the units.
-
-    """
-    return _multi_objs_to_dataframe(container,
-                                    spiketrain_to_dataframe,
-                                    get_all_spiketrains,
-                                    parents=parents, child_first=child_first)
-
-
-def multi_events_to_dataframe(container, parents=True, child_first=True):
-    """Convert one or more `neo.Event` objects to a `pandas.DataFrame`.
-
-    The objects can be any list, dict, or other iterable or mapping containing
-    events, as well as any neo object that can hold events:
-    `neo.Block` and `neo.Segment`.  Objects are searched recursively, so the
-    objects can be nested (such as a list of blocks).
-
-    The `pandas.DataFrame` object has one column for each event, with each
-    element being the event label. columns are padded to the same length with
-    `NaN` values.
-
-    The column heading is a `pandas.MultiIndex` with one index
-    for each of the scalar attributes and annotations of the respective
-    event.  The `index` is the time stamp from the `event.times` attribute.
-
-    Parameters
-    ----------
-
-    container : list, tuple, iterable, dict, neo Block, neo Segment
-                The container for the events to convert.
-    parents : bool, optional
-              Also include attributes and annotations from parent neo
-              objects (if any).
-    child_first : bool, optional
-                  If True (default True), values of child attributes are used
-                  over parent attributes in the event of a name conflict.
-                  If False, parent attributes are used.
-                  This parameter does nothing if `parents` is False.
-
-    Returns
-    -------
-
-    pandas DataFrame
-        A DataFrame containing the labels from `container`.
-
-    Notes
-    -----
-
-    If the length of event.times and event.labels are not the same for any
-    individual event, the longer will be truncated to the length of the
-    shorter for that event.  Between events, lengths can differ.
-
-    The index name is `times`.
-
-    Attributes that contain non-scalar values are skipped.  So are
-    annotations or attributes containing a value of `None`.
-
-    `quantity.Quantities` types are incompatible with `pandas`, so attributes
-    and annotations of that type are converted to a tuple where the first
-    element is the scalar value and the second is the string representation of
-    the units.
-
-    """
-    return _multi_objs_to_dataframe(container,
-                                    event_to_dataframe, get_all_events,
-                                    parents=parents, child_first=child_first)
-
-
-def multi_epochs_to_dataframe(container, parents=True, child_first=True):
-    """Convert one or more `neo.Epoch` objects to a `pandas.DataFrame`.
-
-    The objects can be any list, dict, or other iterable or mapping containing
-    epochs, as well as any neo object that can hold epochs:
-    `neo.Block` and `neo.Segment`.  Objects are searched recursively, so the
-    objects can be nested (such as a list of blocks).
-
-    The `pandas.DataFrame` object has one column for each epoch, with each
-    element being the epoch label. columns are padded to the same length with
-    `NaN` values.
-
-    The column heading is a `pandas.MultiIndex` with one index
-    for each of the scalar attributes and annotations of the respective
-    epoch.  The `index` is a `pandas.MultiIndex`, with the first index being
-    the time stamp from the `epoch.times` attribute and the second being the
-    duration from the `epoch.durations` attribute.
-
-    Parameters
-    ----------
-
-    container : list, tuple, iterable, dict, neo Block, neo Segment
-                The container for the epochs to convert.
-    parents : bool, optional
-              Also include attributes and annotations from parent neo
-              objects (if any).
-    child_first : bool, optional
-                  If True (default True), values of child attributes are used
-                  over parent attributes in the event of a name conflict.
-                  If False, parent attributes are used.
-                  This parameter does nothing if `parents` is False.
-
-    Returns
-    -------
-
-    pandas DataFrame
-        A DataFrame containing the labels from `container`.
-
-    Notes
-    -----
-
-    If the length of `epoch.times`, `epoch.duration`, and `epoch.labels` are
-    not the same for any individual epoch, the longer will be truncated to the
-    length of the shorter for that epoch.  Between epochs, lengths can differ.
-
-    The index level names for `epoch.times` and `epoch.durations` are
-    `times` and `durations`, respectively.
-
-    Attributes that contain non-scalar values are skipped.  So are
-    annotations or attributes containing a value of `None`.
-
-    `quantity.Quantities` types are incompatible with `pandas`, so attributes
-    and annotations of that type are converted to a tuple where the first
-    element is the scalar value and the second is the string representation of
-    the units.
-
-    """
-    return _multi_objs_to_dataframe(container,
-                                    epoch_to_dataframe, get_all_epochs,
-                                    parents=parents, child_first=child_first)
-
-
-def slice_spiketrain(pdobj, t_start=None, t_stop=None):
-    """Slice a `pandas.DataFrame`, changing indices appropriately.
-
-    Values outside the sliced range are converted to `NaN` values.
-
-    Slicing happens over columns.
-
-    This sets the `t_start` and `t_stop` column indexes to be the new values.
-    Otherwise it is the same as setting values outside the range to `NaN`.
-
-    Parameters
-    ----------
-    pdobj : pandas DataFrame
-            The DataFrame to slice.
-    t_start : float, optional.
-              If specified, the returned DataFrame values less than this set
-              to `NaN`.
-              Default is `None` (do not use this argument).
-    t_stop : float, optional.
-             If specified, the returned DataFrame values greater than this set
-             to `NaN`.
-             Default is `None` (do not use this argument).
-
-    Returns
-    -------
-
-    pdobj : scalar, pandas Series, DataFrame, or Panel
-            The returned data type is the same as the type of `pdobj`
-
-    Notes
-    -----
-
-    The order of the index and/or column levels of the returned object may
-    differ  from the order of the original.
-
-    If `t_start` or `t_stop` is specified, all columns indexes will be changed
-    to  the respective values, including those already within the new range.
-    If `t_start` or `t_stop` is not specified, those column indexes will not
-    be changed.
-
-    Returns a copy, even if `t_start` and `t_stop` are both `None`.
-
-    """
-    if t_start is None and t_stop is None:
-        return pdobj.copy()
-
-    if t_stop is not None:
-        pdobj[pdobj > t_stop] = np.nan
-
-        pdobj = pdobj.T.reset_index(level='t_stop')
-        pdobj['t_stop'] = t_stop
-        pdobj = pdobj.set_index('t_stop', append=True).T
-        pdobj = _sort_inds(pdobj, axis=1)
-
-    if t_start is not None:
-        pdobj[pdobj < t_start] = np.nan
-
-        pdobj = pdobj.T.reset_index(level='t_start')
-        pdobj['t_start'] = t_start
-        pdobj = pdobj.set_index('t_start', append=True).T
-        pdobj = _sort_inds(pdobj, axis=1)
-
-    return pdobj
+/annex/objects/MD5-s21991--8a8b1fef6d06c767a08e36656d36afb7

+ 1 - 0
code/elephant/elephant/parallel/__init__.py

@@ -0,0 +1 @@
+/annex/objects/MD5-s1342--b1a524cfb6d8aa1e0fa745d3ed0f35f9

+ 1 - 0
code/elephant/elephant/parallel/mpi.py

@@ -0,0 +1 @@
+/annex/objects/MD5-s2050--ab4110bfb1506fd6d7d3f27f6785172c

+ 1 - 0
code/elephant/elephant/parallel/parallel.py

@@ -0,0 +1 @@
+/annex/objects/MD5-s3668--675558ccafd1e446a4bce17eb3b563c1

+ 1 - 192
code/elephant/elephant/phase_analysis.py

@@ -1,192 +1 @@
-# -*- coding: utf-8 -*-
-"""
-Methods for performing phase analysis.
-
-:copyright: Copyright 2014-2018 by the Elephant team, see `doc/authors.rst`.
-:license: Modified BSD, see LICENSE.txt for details.
-"""
-
-from __future__ import division, print_function, unicode_literals
-
-import numpy as np
-import quantities as pq
-
-__all__ = [
-    "spike_triggered_phase"
-]
-
-
-def spike_triggered_phase(hilbert_transform, spiketrains, interpolate):
-    """
-    Calculate the set of spike-triggered phases of a `neo.AnalogSignal`.
-
-    Parameters
-    ----------
-    hilbert_transform : neo.AnalogSignal or list of neo.AnalogSignal
-        `neo.AnalogSignal` of the complex analytic signal (e.g., returned by
-        the `elephant.signal_processing.hilbert` function).
-        If `hilbert_transform` is only one signal, all spike trains are
-        compared to this signal. Otherwise, length of `hilbert_transform` must
-        match the length of `spiketrains`.
-    spiketrains : neo.SpikeTrain or list of neo.SpikeTrain
-        Spike trains on which to trigger `hilbert_transform` extraction.
-    interpolate : bool
-        If True, the phases and amplitudes of `hilbert_transform` for spikes
-        falling between two samples of signal is interpolated.
-        If False, the closest sample of `hilbert_transform` is used.
-
-    Returns
-    -------
-    phases : list of np.ndarray
-        Spike-triggered phases. Entries in the list correspond to the
-        `neo.SpikeTrain`s in `spiketrains`. Each entry contains an array with
-        the spike-triggered angles (in rad) of the signal.
-    amp : list of pq.Quantity
-        Corresponding spike-triggered amplitudes.
-    times : list of pq.Quantity
-        A list of times corresponding to the signal. They correspond to the
-        times of the `neo.SpikeTrain` referred by the list item.
-
-    Raises
-    ------
-    ValueError
-        If the number of spike trains and number of phase signals don't match,
-        and neither of the two are a single signal.
-
-    Examples
-    --------
-    Create a 20 Hz oscillatory signal sampled at 1 kHz and a random Poisson
-    spike train, then calculate spike-triggered phases and amplitudes of the
-    oscillation:
-
-    >>> import neo
-    >>> import elephant
-    >>> import quantities as pq
-    >>> import numpy as np
-    ...
-    >>> f_osc = 20. * pq.Hz
-    >>> f_sampling = 1 * pq.ms
-    >>> tlen = 100 * pq.s
-    ...
-    >>> time_axis = np.arange(
-    ...     0, tlen.magnitude,
-    ...     f_sampling.rescale(pq.s).magnitude) * pq.s
-    >>> analogsignal = neo.AnalogSignal(
-    ...     np.sin(2 * np.pi * (f_osc * time_axis).simplified.magnitude),
-    ...     units=pq.mV, t_start=0*pq.ms, sampling_period=f_sampling)
-    >>> spiketrain = (elephant.spike_train_generation.
-    ...     homogeneous_poisson_process(
-    ...     50 * pq.Hz, t_start=0.0*pq.ms, t_stop=tlen.rescale(pq.ms)))
-    ...
-    >>> phases, amps, times = elephant.phase_analysis.spike_triggered_phase(
-    ...     elephant.signal_processing.hilbert(analogsignal),
-    ...     spiketrain,
-    ...     interpolate=True)
-
-    """
-
-    # Convert inputs to lists
-    if not isinstance(spiketrains, list):
-        spiketrains = [spiketrains]
-
-    if not isinstance(hilbert_transform, list):
-        hilbert_transform = [hilbert_transform]
-
-    # Number of signals
-    num_spiketrains = len(spiketrains)
-    num_phase = len(hilbert_transform)
-
-    if num_spiketrains != 1 and num_phase != 1 and \
-            num_spiketrains != num_phase:
-        raise ValueError(
-            "Number of spike trains and number of phase signals"
-            "must match, or either of the two must be a single signal.")
-
-    # For each trial, select the first input
-    start = [elem.t_start for elem in hilbert_transform]
-    stop = [elem.t_stop for elem in hilbert_transform]
-
-    result_phases = []
-    result_amps = []
-    result_times = []
-
-    # Step through each signal
-    for spiketrain_i, spiketrain in enumerate(spiketrains):
-        # Check which hilbert_transform AnalogSignal to look at - if there is
-        # only one then all spike trains relate to this one, otherwise the two
-        # lists of spike trains and phases are matched up
-        if num_phase > 1:
-            phase_i = spiketrain_i
-        else:
-            phase_i = 0
-
-        # Take only spikes which lie directly within the signal segment -
-        # ignore spikes sitting on the last sample
-        sttimeind = np.where(np.logical_and(
-            spiketrain >= start[phase_i], spiketrain < stop[phase_i]))[0]
-
-        # Find index into signal for each spike
-        ind_at_spike = np.round(
-            (spiketrain[sttimeind] - hilbert_transform[phase_i].t_start) /
-            hilbert_transform[phase_i].sampling_period). \
-            simplified.magnitude.astype(int)
-
-        # Extract times for speed reasons
-        times = hilbert_transform[phase_i].times
-
-        # Append new list to the results for this spiketrain
-        result_phases.append([])
-        result_amps.append([])
-        result_times.append([])
-
-        # Step through all spikes
-        for spike_i, ind_at_spike_j in enumerate(ind_at_spike):
-            # Difference vector between actual spike time and sample point,
-            # positive if spike time is later than sample point
-            dv = spiketrain[sttimeind[spike_i]] - times[ind_at_spike_j]
-
-            # Make sure ind_at_spike is to the left of the spike time
-            if dv < 0 and ind_at_spike_j > 0:
-                ind_at_spike_j = ind_at_spike_j - 1
-
-            if interpolate:
-                # Get relative spike occurrence between the two closest signal
-                # sample points
-                # if z->0 spike is more to the left sample
-                # if z->1 more to the right sample
-                z = (spiketrain[sttimeind[spike_i]] - times[ind_at_spike_j]) /\
-                    hilbert_transform[phase_i].sampling_period
-
-                # Save hilbert_transform (interpolate on circle)
-                p1 = np.angle(hilbert_transform[phase_i][ind_at_spike_j])
-                p2 = np.angle(hilbert_transform[phase_i][ind_at_spike_j + 1])
-                result_phases[spiketrain_i].append(
-                    np.angle(
-                        (1 - z) * np.exp(np.complex(0, p1)) +
-                        z * np.exp(np.complex(0, p2))))
-
-                # Save amplitude
-                result_amps[spiketrain_i].append(
-                    (1 - z) * np.abs(
-                        hilbert_transform[phase_i][ind_at_spike_j]) +
-                    z * np.abs(hilbert_transform[phase_i][ind_at_spike_j + 1]))
-            else:
-                p1 = np.angle(hilbert_transform[phase_i][ind_at_spike_j])
-                result_phases[spiketrain_i].append(p1)
-
-                # Save amplitude
-                result_amps[spiketrain_i].append(
-                    np.abs(hilbert_transform[phase_i][ind_at_spike_j]))
-
-            # Save time
-            result_times[spiketrain_i].append(spiketrain[sttimeind[spike_i]])
-
-    # Convert outputs to arrays
-    for i, entry in enumerate(result_phases):
-        result_phases[i] = np.array(entry).flatten()
-    for i, entry in enumerate(result_amps):
-        result_amps[i] = pq.Quantity(entry, units=entry[0].units).flatten()
-    for i, entry in enumerate(result_times):
-        result_times[i] = pq.Quantity(entry, units=entry[0].units).flatten()
-
-    return result_phases, result_amps, result_times
+/annex/objects/MD5-s7111--105ad22cd3c565551d7c23d0c16a9e3d

+ 1 - 951
code/elephant/elephant/signal_processing.py

@@ -1,951 +1 @@
-# -*- coding: utf-8 -*-
-"""
-Basic processing procedures for analog signals (e.g., performing a z-score of a
-signal, or filtering a signal).
-
-:copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`.
-:license: Modified BSD, see LICENSE.txt for details.
-"""
-
-from __future__ import division, print_function, unicode_literals
-
-import neo
-import numpy as np
-import quantities as pq
-import scipy.signal
-
-from elephant.utils import deprecated_alias
-
-__all__ = [
-    "zscore",
-    "cross_correlation_function",
-    "butter",
-    "wavelet_transform",
-    "hilbert",
-    "rauc",
-    "derivative"
-]
-
-
-def zscore(signal, inplace=True):
-    r"""
-    Apply a z-score operation to one or several `neo.AnalogSignal` objects.
-
-    The z-score operation subtracts the mean :math:`\mu` of the signal, and
-    divides by its standard deviation :math:`\sigma`:
-
-    .. math::
-         Z(x(t)) = \frac{x(t)-\mu}{\sigma}
-
-    If a `neo.AnalogSignal` object containing multiple signals is provided,
-    the z-transform is always calculated for each signal individually.
-
-    If a list of `neo.AnalogSignal` objects is supplied, the mean and standard
-    deviation are calculated across all objects of the list. Thus, all list
-    elements are z-transformed by the same values of :math:`\\mu` and
-    :math:`\sigma`. For a `neo.AnalogSignal` that contains multiple signals,
-    each signal of the array is treated separately across list elements.
-    Therefore, the number of signals must be identical for each
-    `neo.AnalogSignal` object of the list.
-
-    Parameters
-    ----------
-    signal : neo.AnalogSignal or list of neo.AnalogSignal
-        Signals for which to calculate the z-score.
-    inplace : bool, optional
-        If True, the contents of the input `signal` is replaced by the
-        z-transformed signal.
-        If False, a copy of the original `signal` is returned.
-        Default: True.
-
-    Returns
-    -------
-    signal_ztransofrmed : neo.AnalogSignal or list of neo.AnalogSignal
-        The output format matches the input format: for each input
-        `neo.AnalogSignal`, a corresponding `neo.AnalogSignal` is returned,
-        containing the z-transformed signal with dimensionless unit.
-
-    Notes
-    -----
-    You may supply a list of `neo.AnalogSignal` objects, where each object in
-    the list contains the data of one trial of the experiment, and each signal
-    of the `neo.AnalogSignal` corresponds to the recordings from one specific
-    electrode in a particular trial. In this scenario, you will z-transform
-    the signal of each electrode separately, but transform all trials of a
-    given electrode in the same way.
-
-    Examples
-    --------
-    Z-transform a single `neo.AnalogSignal`, containing only a single signal.
-
-    >>> import neo
-    >>> import numpy as np
-    >>> import quantities as pq
-    ...
-    >>> a = neo.AnalogSignal(
-    ...       np.array([1, 2, 3, 4, 5, 6]).reshape(-1,1) * pq.mV,
-    ...       t_start=0*pq.s, sampling_rate=1000*pq.Hz)
-    >>> zscore(a).as_quantity()
-    [[-1.46385011]
-     [-0.87831007]
-     [-0.29277002]
-     [ 0.29277002]
-     [ 0.87831007]
-     [ 1.46385011]] dimensionless
-
-    Z-transform a single `neo.AnalogSignal` containing multiple signals.
-
-    >>> b = neo.AnalogSignal(
-    ...       np.transpose([[1, 2, 3, 4, 5, 6],
-    ...                     [11, 12, 13, 14, 15, 16]]) * pq.mV,
-    ...       t_start=0*pq.s, sampling_rate=1000*pq.Hz)
-    >>> zscore(b).as_quantity()
-    [[-1.46385011 -1.46385011]
-     [-0.87831007 -0.87831007]
-     [-0.29277002 -0.29277002]
-     [ 0.29277002  0.29277002]
-     [ 0.87831007  0.87831007]
-     [ 1.46385011  1.46385011]] dimensionless
-
-    Z-transform a list of `neo.AnalogSignal`, each one containing more than
-    one signal:
-
-    >>> c = neo.AnalogSignal(
-    ...       np.transpose([[21, 22, 23, 24, 25, 26],
-    ...                     [31, 32, 33, 34, 35, 36]]) * pq.mV,
-    ...       t_start=0*pq.s, sampling_rate=1000*pq.Hz)
-    >>> zscore([b, c])
-    [<AnalogSignal(array([[-1.11669108, -1.08361877],
-       [-1.0672076 , -1.04878252],
-       [-1.01772411, -1.01394628],
-       [-0.96824063, -0.97911003],
-       [-0.91875714, -0.94427378],
-       [-0.86927366, -0.90943753]]) * dimensionless, [0.0 s, 0.006 s],
-       sampling rate: 1000.0 Hz)>,
-       <AnalogSignal(array([[ 0.78170952,  0.84779261],
-       [ 0.86621866,  0.90728682],
-       [ 0.9507278 ,  0.96678104],
-       [ 1.03523694,  1.02627526],
-       [ 1.11974608,  1.08576948],
-       [ 1.20425521,  1.1452637 ]]) * dimensionless, [0.0 s, 0.006 s],
-       sampling rate: 1000.0 Hz)>]
-
-    """
-    # Transform input to a list
-    if not isinstance(signal, list):
-        signal = [signal]
-
-    # Calculate mean and standard deviation
-    signal_stacked = np.vstack(signal)
-    m = np.mean(signal_stacked, axis=0)
-    s = np.std(signal_stacked, axis=0)
-
-    signal_ztransofrmed = []
-    for sig in signal:
-        sig_normalized = sig.magnitude - m.magnitude
-        sig_normalized = np.divide(sig_normalized, s.magnitude,
-                                   out=np.zeros_like(sig_normalized),
-                                   where=s.magnitude != 0)
-        if inplace:
-            sig[:] = pq.Quantity(sig_normalized, units=sig.units)
-            sig_normalized = sig
-        else:
-            sig_normalized = sig.duplicate_with_new_data(sig_normalized)
-            # todo use flag once is fixed
-            #      https://github.com/NeuralEnsemble/python-neo/issues/752
-            sig_normalized.array_annotate(**sig.array_annotations)
-        sig_dimless = sig_normalized / sig.units
-        signal_ztransofrmed.append(sig_dimless)
-
-    # Return single object, or list of objects
-    if len(signal_ztransofrmed) == 1:
-        signal_ztransofrmed = signal_ztransofrmed[0]
-    return signal_ztransofrmed
-
-
-@deprecated_alias(ch_pairs='channel_pairs', nlags='n_lags',
-                  env='hilbert_envelope')
-def cross_correlation_function(signal, channel_pairs, hilbert_envelope=False,
-                               n_lags=None, scaleopt='unbiased'):
-    r"""
-    Computes unbiased estimator of the cross-correlation function.
-
-    The calculations are based on [1]_:
-
-    .. math::
-
-             R(\tau) = \frac{1}{N-|k|} R'(\tau) \\
-
-    where :math:`R'(\tau) = \left<x(t)y(t+\tau)\right>` in a pairwise
-    manner, i.e.:
-
-    `signal[channel_pairs[0,0]]` vs `signal[channel_pairs[0,1]]`,
-
-    `signal[channel_pairs[1,0]]` vs `signal[channel_pairs[1,1]]`,
-
-    and so on.
-
-    The input time series are z-scored beforehand. `scaleopt` controls the
-    choice of :math:`R_{xy}(\tau)` normalizer. Alternatively, returns the
-    Hilbert envelope of :math:`R_{xy}(\tau)`, which is useful to determine the
-    correlation length of oscillatory signals.
-
-    Parameters
-    ----------
-    signal : (nt, nch) neo.AnalogSignal
-        Signal with `nt` number of samples that contains `nch` LFP channels.
-    channel_pairs : list or (n, 2) np.ndarray
-        List with `n` channel pairs for which to compute cross-correlation.
-        Each element of the list must contain 2 channel indices.
-        If `np.ndarray`, the second axis must have dimension 2.
-    hilbert_envelope : bool, optional
-        If True, returns the Hilbert envelope of cross-correlation function
-        result.
-        Default: False.
-    n_lags : int, optional
-        Defines the number of lags for cross-correlation function. If a `float`
-        is passed, it will be rounded to the nearest integer. Number of
-        samples of output is `2*n_lags+1`.
-        If None, the number of samples of the output is equal to the number of
-        samples of the input signal (namely `nt`).
-        Default: None.
-    scaleopt : {'none', 'biased', 'unbiased', 'normalized', 'coeff'}, optional
-        Normalization option, equivalent to matlab `xcorr(..., scaleopt)`.
-        Specified as one of the following.
-
-        * 'none': raw, unscaled cross-correlation
-
-        .. math::
-            R_{xy}(\tau)
-
-        * 'biased': biased estimate of the cross-correlation:
-
-        .. math::
-            R_{xy,biased}(\tau) = \frac{1}{N} R_{xy}(\tau)
-
-        * 'unbiased': unbiased estimate of the cross-correlation:
-
-        .. math::
-            R_{xy,unbiased}(\tau) = \frac{1}{N-\tau} R_{xy}(\tau)
-
-        * 'normalized' or 'coeff': normalizes the sequence so that the
-          autocorrelations at zero lag equal 1:
-
-        .. math::
-            R_{xy,coeff}(\tau) = \frac{1}{\sqrt{R_{xx}(0) R_{yy}(0)}}
-                                 R_{xy}(\tau)
-
-        Default: 'unbiased'.
-
-    Returns
-    -------
-    cross_corr : neo.AnalogSignal
-        Shape: `[2*n_lags+1, n]`
-        Pairwise cross-correlation functions for channel pairs given by
-        `channel_pairs`. If `hilbert_envelope` is True, the output is the
-        Hilbert envelope of the pairwise cross-correlation function. This is
-        helpful to compute the correlation length for oscillating
-        cross-correlation functions.
-
-    Raises
-    ------
-    ValueError
-        If input `signal` is not a `neo.AnalogSignal`.
-
-        If `channel_pairs` is not a list of channel pair indices with shape
-        `(n,2)`.
-
-        If `hilbert_envelope` is not a boolean.
-
-        If `n_lags` is not a positive integer.
-
-        If `scaleopt` is not one of the predefined above keywords.
-
-    References
-    ----------
-    .. [1] Stoica, P., & Moses, R. (2005). Spectral Analysis of Signals.
-       Prentice Hall. Retrieved from http://user.it.uu.se/~ps/SAS-new.pdf,
-       Eq. 2.2.3.
-
-    Examples
-    --------
-    >>> import neo
-    >>> import quantities as pq
-    >>> import matplotlib.pyplot as plt
-    ...
-    >>> dt = 0.02
-    >>> N = 2018
-    >>> f = 0.5
-    >>> t = np.arange(N)*dt
-    >>> x = np.zeros((N,2))
-    >>> x[:,0] = 0.2 * np.sin(2.*np.pi*f*t)
-    >>> x[:,1] = 5.3 * np.cos(2.*np.pi*f*t)
-    ...
-    >>> # Generate neo.AnalogSignals from x and find cross-correlation
-    >>> signal = neo.AnalogSignal(x, units='mV', t_start=0.*pq.ms,
-    >>>     sampling_rate=1/dt*pq.Hz, dtype=float)
-    >>> rho = cross_correlation_function(signal, [0,1], n_lags=150)
-    >>> env = cross_correlation_function(signal, [0,1], n_lags=150,
-    ...     hilbert_envelope=True)
-    ...
-    >>> plt.plot(rho.times, rho)
-    >>> plt.plot(env.times, env) # should be equal to one
-    >>> plt.show()
-
-    """
-
-    # Make channel_pairs a 2D array
-    pairs = np.asarray(channel_pairs)
-    if pairs.ndim == 1:
-        pairs = np.expand_dims(pairs, axis=0)
-
-    # Check input
-    if not isinstance(signal, neo.AnalogSignal):
-        raise ValueError('Input signal must be of type neo.AnalogSignal')
-    if pairs.shape[1] != 2:
-        raise ValueError("'channel_pairs' is not a list of channel pair "
-                         "indices. Cannot define pairs for cross-correlation.")
-    if not isinstance(hilbert_envelope, bool):
-        raise ValueError("'hilbert_envelope' must be a boolean value")
-    if n_lags is not None:
-        if not isinstance(n_lags, int) or n_lags <= 0:
-            raise ValueError('n_lags must be a non-negative integer')
-
-    # z-score analog signal and store channel time series in different arrays
-    # Cross-correlation will be calculated between xsig and ysig
-    z_transformed = zscore(signal, inplace=False).magnitude
-    # transpose (nch, xy, nt) -> (xy, nt, nch)
-    xsig, ysig = np.transpose(z_transformed.T[pairs], (1, 2, 0))
-
-    # Define vector of lags tau
-    nt, nch = xsig.shape
-    tau = np.arange(nt) - nt // 2
-
-    # Calculate cross-correlation by taking Fourier transform of signal,
-    # multiply in Fourier space, and transform back. Correct for bias due
-    # to zero-padding
-    xcorr = scipy.signal.fftconvolve(xsig, ysig[::-1], mode='same', axes=0)
-    if scaleopt == 'biased':
-        xcorr /= nt
-    elif scaleopt == 'unbiased':
-        normalizer = np.expand_dims(nt - np.abs(tau), axis=1)
-        xcorr /= normalizer
-    elif scaleopt in ('normalized', 'coeff'):
-        normalizer = np.sqrt((xsig ** 2).sum(axis=0) * (ysig ** 2).sum(axis=0))
-        xcorr /= normalizer
-    elif scaleopt != 'none':
-        raise ValueError("Invalid scaleopt mode: '{}'".format(scaleopt))
-
-    # Calculate envelope of cross-correlation function with Hilbert transform.
-    # This is useful for transient oscillatory signals.
-    if hilbert_envelope:
-        xcorr = np.abs(scipy.signal.hilbert(xcorr, axis=0))
-
-    # Cut off lags outside the desired range
-    if n_lags is not None:
-        tau0 = np.argwhere(tau == 0).item()
-        xcorr = xcorr[tau0 - n_lags: tau0 + n_lags + 1, :]
-
-    # Return neo.AnalogSignal
-    cross_corr = neo.AnalogSignal(xcorr,
-                                  units='',
-                                  t_start=tau[0] * signal.sampling_period,
-                                  t_stop=tau[-1] * signal.sampling_period,
-                                  sampling_rate=signal.sampling_rate,
-                                  dtype=float)
-    return cross_corr
-
-
-@deprecated_alias(highpass_freq='highpass_frequency',
-                  lowpass_freq='lowpass_frequency',
-                  fs='sampling_frequency')
-def butter(signal, highpass_frequency=None, lowpass_frequency=None, order=4,
-           filter_function='filtfilt', sampling_frequency=1.0, axis=-1):
-    """
-    Butterworth filtering function for `neo.AnalogSignal`.
-
-    Filter type is determined according to how values of `highpass_frequency`
-    and `lowpass_frequency` are given (see "Parameters" section for details).
-
-    Parameters
-    ----------
-    signal : neo.AnalogSignal or pq.Quantity or np.ndarray
-        Time series data to be filtered.
-        If `pq.Quantity` or `np.ndarray`, the sampling frequency should be
-        given through the keyword argument `fs`.
-    highpass_frequency : pq.Quantity of float, optional
-        High-pass cut-off frequency. If `float`, the given value is taken as
-        frequency in Hz.
-        Default: None.
-    lowpass_frequency : pq.Quantity or float, optional
-        Low-pass cut-off frequency. If `float`, the given value is taken as
-        frequency in Hz.
-        Filter type is determined depending on the values of
-        `lowpass_frequency` and `highpass_frequency`:
-
-        * `highpass_frequency` only (`lowpass_frequency` is None):
-        highpass filter
-
-        * `lowpass_frequency` only (`highpass_frequency` is None):
-        lowpass filter
-
-        * `highpass_frequency` < `lowpass_frequency`: bandpass filter
-
-        * `highpass_frequency` > `lowpass_frequency`: bandstop filter
-
-        Default: None.
-    order : int, optional
-        Order of the Butterworth filter.
-        Default: 4.
-    filter_function : {'filtfilt', 'lfilter', 'sosfiltfilt'}, optional
-        Filtering function to be used. Available filters:
-
-        * 'filtfilt': `scipy.signal.filtfilt`;
-
-        * 'lfilter': `scipy.signal.lfilter`;
-
-        * 'sosfiltfilt': `scipy.signal.sosfiltfilt`.
-
-        In most applications 'filtfilt' should be used, because it doesn't
-        bring about phase shift due to filtering. For numerically stable
-        filtering, in particular higher order filters, use 'sosfiltfilt'
-        (see [1]_).
-        Default: 'filtfilt'.
-    sampling_frequency : pq.Quantity or float, optional
-        The sampling frequency of the input time series. When given as
-        `float`, its value is taken as frequency in Hz. When `signal` is given
-        as `neo.AnalogSignal`, its attribute is used to specify the sampling
-        frequency and this parameter is ignored.
-        Default: 1.0.
-    axis : int, optional
-        Axis along which filter is applied.
-        Default: last axis (-1).
-
-    Returns
-    -------
-    filtered_signal : neo.AnalogSignal or pq.Quantity or np.ndarray
-        Filtered input data. The shape and type is identical to those of the
-        input `signal`.
-
-    Raises
-    ------
-    ValueError
-        If `filter_function` is not one of 'lfilter', 'filtfilt',
-        or 'sosfiltfilt'.
-
-        If both `highpass_frequency` and `lowpass_frequency` are None.
-
-    References
-    ----------
-    .. [1] https://github.com/NeuralEnsemble/elephant/issues/220
-
-    """
-    available_filters = 'lfilter', 'filtfilt', 'sosfiltfilt'
-    if filter_function not in available_filters:
-        raise ValueError("Invalid `filter_function`: {filter_function}. "
-                         "Available filters: {available_filters}".format(
-                             filter_function=filter_function,
-                             available_filters=available_filters))
-    # design filter
-    if hasattr(signal, 'sampling_rate'):
-        sampling_frequency = signal.sampling_rate.rescale(pq.Hz).magnitude
-    if isinstance(highpass_frequency, pq.quantity.Quantity):
-        highpass_frequency = highpass_frequency.rescale(pq.Hz).magnitude
-    if isinstance(lowpass_frequency, pq.quantity.Quantity):
-        lowpass_frequency = lowpass_frequency.rescale(pq.Hz).magnitude
-    Fn = sampling_frequency / 2.
-    # filter type is determined according to the values of cut-off
-    # frequencies
-    if lowpass_frequency and highpass_frequency:
-        if highpass_frequency < lowpass_frequency:
-            Wn = (highpass_frequency / Fn, lowpass_frequency / Fn)
-            btype = 'bandpass'
-        else:
-            Wn = (lowpass_frequency / Fn, highpass_frequency / Fn)
-            btype = 'bandstop'
-    elif lowpass_frequency:
-        Wn = lowpass_frequency / Fn
-        btype = 'lowpass'
-    elif highpass_frequency:
-        Wn = highpass_frequency / Fn
-        btype = 'highpass'
-    else:
-        raise ValueError(
-            "Either highpass_frequency or lowpass_frequency must be given"
-        )
-    if filter_function == 'sosfiltfilt':
-        output = 'sos'
-    else:
-        output = 'ba'
-    designed_filter = scipy.signal.butter(order, Wn, btype=btype,
-                                          output=output)
-
-    # When the input is AnalogSignal, the axis for time index (i.e. the
-    # first axis) needs to be rolled to the last
-    data = np.asarray(signal)
-    if isinstance(signal, neo.AnalogSignal):
-        data = np.rollaxis(data, 0, len(data.shape))
-
-    # apply filter
-    if filter_function == 'lfilter':
-        b, a = designed_filter
-        filtered_data = scipy.signal.lfilter(b=b, a=a, x=data, axis=axis)
-    elif filter_function == 'filtfilt':
-        b, a = designed_filter
-        filtered_data = scipy.signal.filtfilt(b=b, a=a, x=data, axis=axis)
-    else:
-        filtered_data = scipy.signal.sosfiltfilt(sos=designed_filter,
-                                                 x=data, axis=axis)
-
-    if isinstance(signal, neo.AnalogSignal):
-        filtered_data = np.rollaxis(filtered_data, -1, 0)
-        signal_out = signal.duplicate_with_new_data(filtered_data)
-        # todo use flag once is fixed
-        #      https://github.com/NeuralEnsemble/python-neo/issues/752
-        signal_out.array_annotate(**signal.array_annotations)
-        return signal_out
-    elif isinstance(signal, pq.quantity.Quantity):
-        return filtered_data * signal.units
-    else:
-        return filtered_data
-
-
-@deprecated_alias(nco='n_cycles', freq='frequency', fs='sampling_frequency')
-def wavelet_transform(signal, frequency, n_cycles=6.0, sampling_frequency=1.0,
-                      zero_padding=True):
-    r"""
-    Compute the wavelet transform of a given signal with Morlet mother
-    wavelet.
-
-    The parametrization of the wavelet is based on [1]_.
-
-    Parameters
-    ----------
-    signal : (Nt, Nch) neo.AnalogSignal or np.ndarray or list
-        Time series data to be wavelet-transformed. When multi-dimensional
-        `np.ndarray` or list is given, the time axis must be the last
-        dimension. If `neo.AnalogSignal`, `Nt` is the number of time points
-        and `Nch` is the number of channels.
-    frequency : float or list of float
-        Center frequency of the Morlet wavelet in Hz. Multiple center
-        frequencies can be given as a list, in which case the function
-        computes the wavelet transforms for all the given frequencies at once.
-    n_cycles : float, optional
-        Size of the mother wavelet (approximate number of oscillation cycles
-        within a wavelet). Corresponds to :math:`nco` in the paper [1]_.
-        A larger `n_cycles` value leads to a higher frequency resolution and a
-        lower temporal resolution, and vice versa.
-        Typically used values are in a range of 3–8, but one should be cautious
-        when using a value smaller than ~ 6, in which case the admissibility of
-        the wavelet is not ensured (cf. [2]_).
-        Default: 6.0.
-    sampling_frequency : float, optional
-        Sampling rate of the input data in Hz.
-        When `signal` is given as a `neo.AnalogSignal`, the sampling frequency
-        is taken from its attribute and this parameter is ignored.
-        Default: 1.0.
-    zero_padding : bool, optional
-        Specifies whether the data length is extended to the least power of
-        2 greater than the original length, by padding zeros to the tail, for
-        speeding up the computation.
-        If True, the extended part is cut out from the final result before
-        returned, so that the output has the same length as the input.
-        Default: True.
-
-    Returns
-    -------
-    signal_wt : np.ndarray
-        Wavelet transform of the input data. When `frequency` was given as a
-        list, the way how the wavelet transforms for different frequencies are
-        returned depends on the input type:
-
-        * when the input was a `neo.AnalogSignal`, the returned array has
-          shape (`Nt`, `Nch`, `Nf`), where `Nf` = `len(freq)`, such that the
-          last dimension indexes the frequencies;
-
-        * when the input was a `np.ndarray` or list of shape
-          (`a`, `b`, ..., `c`, `Nt`), the returned array has a shape
-          (`a`, `b`, ..., `c`, `Nf`, `Nt`), such that the second last
-          dimension indexes the frequencies.
-
-        To summarize, `signal_wt.ndim` = `signal.ndim` + 1, with the
-        additional dimension in the last axis (for `neo.AnalogSignal` input)
-        or the second last axis (`np.ndarray` or list input) indexing the
-        frequencies.
-
-    Raises
-    ------
-    ValueError
-        If `frequency` (or one of the values in `frequency` when it is a list)
-        is greater than the half of `sampling_frequency`.
-
-        If `n_cycles` is not positive.
-
-    Notes
-    -----
-    `n_cycles` is related to the wavelet number :math:`w` as
-    :math:`w \sim 2 \pi \frac{n_{\text{cycles}}}{6}`, as defined in [1]_.
-
-    References
-    ----------
-    .. [1] M. Le Van Quyen, J. Foucher, J. Lachaux, E. Rodriguez, A. Lutz,
-           J. Martinerie, & F.J. Varela, "Comparison of Hilbert transform and
-           wavelet methods for the analysis of neuronal synchrony," J Neurosci
-           Meth, vol. 111, pp. 83–98, 2001.
-    .. [2] M. Farge, "Wavelet Transforms and their Applications to
-           Turbulence," Annu Rev Fluid Mech, vol. 24, pp. 395–458, 1992.
-
-    """
-    def _morlet_wavelet_ft(freq, n_cycles, fs, n):
-        # Generate the Fourier transform of Morlet wavelet as defined
-        # in Le van Quyen et al. J Neurosci Meth 111:83-98 (2001).
-        sigma = n_cycles / (6. * freq)
-        freqs = np.fft.fftfreq(n, 1.0 / fs)
-        heaviside = np.array(freqs > 0., dtype=np.float)
-        ft_real = np.sqrt(2 * np.pi * freq) * sigma * np.exp(
-            -2 * (np.pi * sigma * (freqs - freq)) ** 2) * heaviside * fs
-        ft_imag = np.zeros_like(ft_real)
-        return ft_real + 1.0j * ft_imag
-
-    data = np.asarray(signal)
-    # When the input is AnalogSignal, the axis for time index (i.e. the
-    # first axis) needs to be rolled to the last
-    if isinstance(signal, neo.AnalogSignal):
-        data = np.rollaxis(data, 0, data.ndim)
-
-    # When the input is AnalogSignal, use its attribute to specify the
-    # sampling frequency
-    if hasattr(signal, 'sampling_rate'):
-        sampling_frequency = signal.sampling_rate
-    if isinstance(sampling_frequency, pq.quantity.Quantity):
-        sampling_frequency = sampling_frequency.rescale('Hz').magnitude
-
-    if isinstance(frequency, (list, tuple, np.ndarray)):
-        freqs = np.asarray(frequency)
-    else:
-        freqs = np.array([frequency, ])
-    if isinstance(freqs[0], pq.quantity.Quantity):
-        freqs = [f.rescale('Hz').magnitude for f in freqs]
-
-    # check whether the given central frequencies are less than the
-    # Nyquist frequency of the signal
-    if np.any(freqs >= sampling_frequency / 2):
-        raise ValueError("'frequency' elements must be less than the half of "
-                         "the 'sampling_frequency' ({}) Hz"
-                         .format(sampling_frequency))
-
-    # check if n_cycles is positive
-    if n_cycles <= 0:
-        raise ValueError("`n_cycles` must be positive")
-
-    n_orig = data.shape[-1]
-    if zero_padding:
-        n = 2 ** (int(np.log2(n_orig)) + 1)
-    else:
-        n = n_orig
-
-    # generate Morlet wavelets (in the frequency domain)
-    wavelet_fts = np.empty([len(freqs), n], dtype=np.complex)
-    for i, f in enumerate(freqs):
-        wavelet_fts[i] = _morlet_wavelet_ft(f, n_cycles, sampling_frequency, n)
-
-    # perform wavelet transform by convoluting the signal with the wavelets
-    if data.ndim == 1:
-        data = np.expand_dims(data, 0)
-    data = np.expand_dims(data, data.ndim - 1)
-    data = np.fft.ifft(np.fft.fft(data, n) * wavelet_fts)
-    signal_wt = data[..., 0:n_orig]
-
-    # reshape the result array according to the input
-    if isinstance(signal, neo.AnalogSignal):
-        signal_wt = np.rollaxis(signal_wt, -1)
-        if not isinstance(frequency, (list, tuple, np.ndarray)):
-            signal_wt = signal_wt[..., 0]
-    else:
-        if signal.ndim == 1:
-            signal_wt = signal_wt[0]
-        if not isinstance(frequency, (list, tuple, np.ndarray)):
-            signal_wt = signal_wt[..., 0, :]
-
-    return signal_wt
-
-
-@deprecated_alias(N='padding')
-def hilbert(signal, padding='nextpow'):
-    """
-    Apply a Hilbert transform to a `neo.AnalogSignal` object in order to
-    obtain its (complex) analytic signal.
-
-    The time series of the instantaneous angle and amplitude can be obtained
-    as the angle (`np.angle` function) and absolute value (`np.abs` function)
-    of the complex analytic signal, respectively.
-
-    By default, the function will zero-pad the signal to a length
-    corresponding to the next higher power of 2. This will provide higher
-    computational efficiency at the expense of memory. In addition, this
-    circumvents a situation where, for some specific choices of the length of
-    the input, `scipy.signal.hilbert` function will not terminate.
-
-    Parameters
-    ----------
-    signal : neo.AnalogSignal
-        Signal(s) to transform.
-    padding : int, {'none', 'nextpow'}, or None, optional
-        Defines whether the signal is zero-padded.
-        The `padding` argument corresponds to `N` in
-        `scipy.signal.hilbert(signal, N=padding)` function.
-        If 'none' or None, no padding.
-        If 'nextpow', zero-pad to the next length that is a power of 2.
-        If it is an `int`, directly specify the length to zero-pad to
-        (indicates the number of Fourier components).
-        Default: 'nextpow'.
-
-    Returns
-    -------
-    neo.AnalogSignal
-        Contains the complex analytic signal(s) corresponding to the input
-        `signal`. The unit of the returned `neo.AnalogSignal` is
-        dimensionless.
-
-    Raises
-    ------
-    ValueError:
-        If `padding` is not an integer or neither 'nextpow' nor 'none' (None).
-
-    Examples
-    --------
-    Create a sine signal at 5 Hz with increasing amplitude and calculate the
-    instantaneous phases:
-
-    >>> import numpy as np
-    >>> import quantities as pq
-    >>> import neo
-    >>> import matplotlib.pyplot as plt
-    ...
-    >>> t = np.arange(0, 5000) * pq.ms
-    >>> f = 5. * pq.Hz
-    >>> a = neo.AnalogSignal(
-    ...       np.array(
-    ...           (1 + t.magnitude / t[-1].magnitude) * np.sin(
-    ...               2. * np.pi * f * t.rescale(pq.s))).reshape(
-    ...                   (-1,1)) * pq.mV,
-    ...       t_start=0*pq.s,
-    ...       sampling_rate=1000*pq.Hz)
-    ...
-    >>> analytic_signal = hilbert(a, padding='nextpow')
-    >>> angles = np.angle(analytic_signal)
-    >>> amplitudes = np.abs(analytic_signal)
-    >>> print(angles)
-    [[-1.57079633]
-     [-1.51334228]
-     [-1.46047675]
-     ...,
-     [-1.73112977]
-     [-1.68211683]
-     [-1.62879501]]
-    >>> plt.plot(t, angles)
-
-    """
-    # Length of input signals
-    n_org = signal.shape[0]
-
-    # Right-pad signal to desired length using the signal itself
-    if isinstance(padding, int):
-        # User defined padding
-        n = padding
-    elif padding == 'nextpow':
-        # To speed up calculation of the Hilbert transform, make sure we change
-        # the signal to be of a length that is a power of two. Failure to do so
-        # results in computations of certain signal lengths to not finish (or
-        # finish in absurd time). This might be a bug in scipy (0.16), e.g.,
-        # the following code will not terminate for this value of k:
-        #
-        # import numpy
-        # import scipy.signal
-        # k=679346
-        # t = np.arange(0, k) / 1000.
-        # a = (1 + t / t[-1]) * np.sin(2 * np.pi * 5 * t)
-        # analytic_signal = scipy.signal.hilbert(a)
-        #
-        # For this reason, nextpow is the default setting for now.
-
-        n = 2 ** (int(np.log2(n_org - 1)) + 1)
-    elif padding == 'none' or padding is None:
-        # No padding
-        n = n_org
-    else:
-        raise ValueError("Invalid padding '{}'.".format(padding))
-
-    output = signal.duplicate_with_new_data(
-        scipy.signal.hilbert(signal.magnitude, N=n, axis=0)[:n_org])
-    # todo use flag once is fixed
-    #      https://github.com/NeuralEnsemble/python-neo/issues/752
-    output.array_annotate(**signal.array_annotations)
-    return output / output.units
-
-
-def rauc(signal, baseline=None, bin_duration=None, t_start=None, t_stop=None):
-    """
-    Calculate the rectified area under the curve (RAUC) for a
-    `neo.AnalogSignal`.
-
-    The signal is optionally divided into bins with duration `bin_duration`,
-    and the rectified signal (absolute value) is integrated within each bin to
-    find the area under the curve. The mean or median of the signal or an
-    arbitrary baseline may optionally be subtracted before rectification.
-
-    Parameters
-    ----------
-    signal : neo.AnalogSignal
-        The signal to integrate. If `signal` contains more than one channel,
-        each is integrated separately.
-    baseline : pq.Quantity or {'mean', 'median'}, optional
-        A factor to subtract from the signal before rectification.
-        If 'mean', the mean value of the entire `signal` is subtracted on a
-        channel-by-channel basis.
-        If 'median', the median value of the entire `signal` is subtracted on
-        a channel-by-channel basis.
-        Default: None.
-    bin_duration : pq.Quantity, optional
-        The length of time that each integration should span.
-        If None, there will be only one bin spanning the entire signal
-        duration.
-        If `bin_duration` does not divide evenly into the signal duration, the
-        end of the signal is padded with zeros to accomodate the final,
-        overextending bin.
-        Default: None.
-    t_start: pq.Quantity, optional
-        Time to start the algorithm.
-        If None, starts at the beginning of `signal`.
-        Default: None.
-    t_stop : pq.Quantity, optional
-        Time to end the algorithm.
-        If None, ends at the last time of `signal`.
-        The signal is cropped using `signal.time_slice(t_start, t_stop)` after
-        baseline removal. Useful if you want the RAUC for a short section of
-        the signal but want the mean or median calculation (`baseline`='mean'
-        or `baseline`='median') to use the entire signal for better baseline
-        estimation.
-        Default: None.
-
-    Returns
-    -------
-    pq.Quantity or neo.AnalogSignal
-        If the number of bins is 1, the returned object is a scalar or
-        vector `pq.Quantity` containing a single RAUC value for each channel.
-        Otherwise, the returned object is a `neo.AnalogSignal` containing the
-        RAUC(s) for each bin stored as a sample, with times corresponding to
-        the center of each bin. The output signal will have the same number
-        of channels as the input signal.
-
-    Raises
-    ------
-    ValueError
-        If `signal` is not `neo.AnalogSignal`.
-
-        If `bin_duration` is not None or `pq.Quantity`.
-
-        If `baseline` is not None, 'mean', 'median', or `pq.Quantity`.
-
-    See Also
-    --------
-    neo.AnalogSignal.time_slice : how `t_start` and `t_stop` are used
-
-    """
-
-    if not isinstance(signal, neo.AnalogSignal):
-        raise ValueError('Input signal is not a neo.AnalogSignal!')
-
-    if baseline is None:
-        pass
-    elif baseline == 'mean':
-        # subtract mean from each channel
-        signal = signal - signal.mean(axis=0)
-    elif baseline == 'median':
-        # subtract median from each channel
-        signal = signal - np.median(signal.as_quantity(), axis=0)
-    elif isinstance(baseline, pq.Quantity):
-        # subtract arbitrary baseline
-        signal = signal - baseline
-    else:
-        raise ValueError("baseline must be either None, 'mean', 'median', or "
-                         "a Quantity. Got {}".format(baseline))
-
-    # slice the signal after subtracting baseline
-    signal = signal.time_slice(t_start, t_stop)
-
-    if bin_duration is not None:
-        # from bin duration, determine samples per bin and number of bins
-        if isinstance(bin_duration, pq.Quantity):
-            samples_per_bin = int(
-                np.round(
-                    bin_duration.rescale('s') /
-                    signal.sampling_period.rescale('s')))
-            n_bins = int(np.ceil(signal.shape[0] / samples_per_bin))
-        else:
-            raise ValueError("bin_duration must be a Quantity. Got {}".format(
-                bin_duration))
-    else:
-        # all samples in one bin
-        samples_per_bin = signal.shape[0]
-        n_bins = 1
-
-    # store the actual bin duration
-    bin_duration = samples_per_bin * signal.sampling_period
-
-    # reshape into equal size bins, padding the end with zeros if necessary
-    n_channels = signal.shape[1]
-    sig_binned = signal.as_quantity().copy()
-    sig_binned.resize(n_bins * samples_per_bin, n_channels, refcheck=False)
-    sig_binned = sig_binned.reshape(n_bins, samples_per_bin, n_channels)
-
-    # rectify and integrate over each bin
-    rauc = np.trapz(np.abs(sig_binned), dx=signal.sampling_period, axis=1)
-
-    if n_bins == 1:
-        # return a single value for each channel
-        return rauc.squeeze()
-
-    else:
-        # return an AnalogSignal with times corresponding to center of each bin
-        t_start = signal.t_start.rescale(bin_duration.units) + bin_duration / 2
-        rauc_sig = neo.AnalogSignal(rauc, t_start=t_start,
-                                    sampling_period=bin_duration)
-        return rauc_sig
-
-
-def derivative(signal):
-    """
-    Calculate the derivative of a `neo.AnalogSignal`.
-
-    Parameters
-    ----------
-    signal : neo.AnalogSignal
-        The signal to differentiate. If `signal` contains more than one
-        channel, each is differentiated separately.
-
-    Returns
-    -------
-    derivative_sig: neo.AnalogSignal
-        The returned object is a `neo.AnalogSignal` containing the differences
-        between each successive sample value of the input signal divided by
-        the sampling period. Times are centered between the successive samples
-        of the input. The output signal will have the same number of channels
-        as the input signal.
-
-    Raises
-    ------
-    TypeError
-        If `signal` is not a `neo.AnalogSignal`.
-
-    """
-
-    if not isinstance(signal, neo.AnalogSignal):
-        raise TypeError('Input signal is not a neo.AnalogSignal!')
-
-    derivative_sig = neo.AnalogSignal(
-        np.diff(signal.as_quantity(), axis=0) / signal.sampling_period,
-        t_start=signal.t_start + signal.sampling_period / 2,
-        sampling_period=signal.sampling_period)
-
-    return derivative_sig
+/annex/objects/MD5-s36918--87c70ad694ded981c47b37d27ab56f14

File diff suppressed because it is too large
+ 1 - 2349
code/elephant/elephant/spade.py


+ 1 - 11
code/elephant/elephant/spade_src/LICENSE

@@ -1,11 +1 @@
-For any version published on or after October 23, 2014:
-
-(MIT license, or more precisely Expat License; to be found in the file mit-license.txt in the directory <prgname>/doc in the source package of the program, see also opensource.org and wikipedia.org)
-
-© 1996-2014 Christian Borgelt
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+/annex/objects/MD5-s1311--c1744a342b19ebc0cd7ddde10b35ba60

+ 1 - 0
code/elephant/elephant/spade_src/__init__.py

@@ -0,0 +1 @@
+/annex/objects/MD5-s0--d41d8cd98f00b204e9800998ecf8427e

+ 0 - 0
code/elephant/elephant/spade_src/fast_fca.py


Some files were not shown because too many files changed in this diff