Ver Fonte

added hallucinations

asobolev há 6 meses atrás
pai
commit
af00b4a69d

Diff do ficheiro suprimidas por serem muito extensas
+ 77 - 14
analysis/AEPs/AEPs - single.ipynb


+ 0 - 42
analysis/Behavior/Clustering.ipynb

@@ -144,48 +144,6 @@
     "### Build feature matrix"
    ]
   },
-  {
-   "cell_type": "code",
-   "execution_count": 11,
-   "id": "d2f4e736",
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "10"
-      ]
-     },
-     "execution_count": 11,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "u_labels.index('5-5')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 9,
-   "id": "7bc7b3fd",
-   "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "(array([ 7,  8,  9, 10]),)"
-      ]
-     },
-     "execution_count": 9,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
-   "source": [
-    "np.where(s_labels == 2)[0], "
-   ]
-  },
   {
    "cell_type": "code",
    "execution_count": 8,

Diff do ficheiro suprimidas por serem muito extensas
+ 635 - 0
analysis/Behavior/MoSeq tSNE - UMAP - PCA.ipynb


Diff do ficheiro suprimidas por serem muito extensas
+ 227 - 0
analysis/Behavior/Sustained activity.ipynb


Diff do ficheiro suprimidas por serem muito extensas
+ 924 - 0
analysis/Hallucinations/Development.ipynb


Diff do ficheiro suprimidas por serem muito extensas
+ 406 - 0
analysis/Hallucinations/Response profiles.ipynb


Diff do ficheiro suprimidas por serem muito extensas
+ 832 - 0
analysis/Hallucinations/Response readout.ipynb


Diff do ficheiro suprimidas por serem muito extensas
+ 72 - 37
analysis/PSTH/TGT - BGR and Target Onset - Offset.ipynb


Diff do ficheiro suprimidas por serem muito extensas
+ 261 - 96
analysis/PSTH/shuffle.ipynb


Diff do ficheiro suprimidas por serem muito extensas
+ 228 - 0
analysis/PSTH/silence -> trial onset.ipynb


Diff do ficheiro suprimidas por serem muito extensas
+ 0 - 222
analysis/PSTH/silence.ipynb


+ 3 - 3
analysis/Target/unit before onset.ipynb

@@ -259,7 +259,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "3955b549",
+   "id": "b9b7ff86",
    "metadata": {},
    "source": [
     "## Increase FR in TGT SUCCESS, not in TGT MISS, and drop FR after success"
@@ -268,7 +268,7 @@
   {
    "cell_type": "code",
    "execution_count": 42,
-   "id": "ababef37",
+   "id": "306f0f00",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -278,7 +278,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "27cabb9e",
+   "id": "e68da9d5",
    "metadata": {},
    "outputs": [],
    "source": []

+ 62 - 1
analysis/target.py

@@ -62,6 +62,67 @@ def build_silence_matrix(tl):
     return np.column_stack([idxs_silence_start, idxs_silence_end])
 
 
+def get_idxs_of_event_periods(tl, event_type):
+    # event_type: -1, 0, 1, 2 (noise, silence, background, target)
+    # returns: indices to timeline for periods of event_type
+    idxs_events  = np.where(tl[:, 6] == event_type)[0]
+    idxs_to_idxs = np.where(np.diff(idxs_events) > 1)[0]
+
+    # periods - indices to TL where was silent
+    periods       = np.zeros([len(idxs_to_idxs) + 1, 2])
+    periods[0]    = np.array([0, idxs_to_idxs[0]])
+    periods[1:-1] = np.column_stack([idxs_to_idxs[:-1] + 1, idxs_to_idxs[1:]])
+    periods[-1]   = np.array([idxs_to_idxs[-1], len(idxs_events) - 1])
+    periods       = periods.astype(np.int32)
+
+    # convert to TL indices
+    return np.column_stack([idxs_events[periods[:, 0]], idxs_events[periods[:, 1]]])
+
+
+def build_silence_and_noise_events(tl, offset, latency, drift):
+    # build hallucination pulses in silence and noise
+    duration = tl[-1][0]
+    
+    # all pulses with drift
+    pulse_times = np.linspace(0, int(duration - latency), int(duration - latency)*4 + 1) + offset
+    pulse_times += np.arange(len(pulse_times)) * drift/len(pulse_times)
+
+    # filter silence times only
+    pulses_silence = []
+    pulses_noise   = []
+    tl_idx = 0  # index of current pulse in the timeline
+    for t_pulse in pulse_times:
+        while tl[tl_idx][0] < t_pulse:
+            tl_idx += 1
+
+        if tl[tl_idx][6] == 0:
+            pulses_silence.append(t_pulse)
+        elif tl[tl_idx][6] == -1:
+            pulses_noise.append(t_pulse)
+
+    pulses_silence = np.array(pulses_silence)
+    pulses_noise   = np.array(pulses_noise)
+    return pulses_silence, pulses_noise
+
+
+def get_spike_times_at(tl, s_times, periods, mode='sequence'):
+    # 'sequence' - periods follow each other in a sequence
+    # 'overlay'  - all periods aligned to time zero
+    all_spikes  = []  # collect as groups
+    sil_dur = 0
+    for period in periods:
+        idxs_tl_l, idxs_tl_r = period[0], period[1]
+
+        spikes = s_times[(s_times > tl[idxs_tl_l][0]) & (s_times < tl[idxs_tl_r][0])]
+        spikes -= tl[idxs_tl_l][0]  # align to time 0
+        if mode == 'sequence':
+            spikes += sil_dur  # adjust to already processed silence periods
+        all_spikes.append(spikes)
+
+        sil_dur += tl[idxs_tl_r][0] - tl[idxs_tl_l][0]
+    return all_spikes  #np.array([item for sublist in all_spikes for item in sublist])
+
+
 def get_spike_counts(spk_times, pulse_times, hw=0.25, bin_count=51):
     collected = []
     for t_pulse in pulse_times:
@@ -71,7 +132,7 @@ def get_spike_counts(spk_times, pulse_times, hw=0.25, bin_count=51):
 
     bins = np.linspace(-hw, hw, bin_count)
     counts, _ = np.histogram(collected, bins=bins)
-    counts = (counts / len(pulse_times))# * 1/((2. * hw)/float(bin_count - 1))
+    counts = counts / len(pulse_times) # * 1/((2. * hw)/float(bin_count - 1))
     counts = counts / (bins[1] - bins[0])  # divide by bin size to get firing rate
     
     return bins, counts

+ 147 - 4
session/overview.ipynb

@@ -2,7 +2,33 @@
  "cells": [
   {
    "cell_type": "code",
-   "execution_count": 9,
+   "execution_count": 7,
+   "id": "4001a375",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import sys, os\n",
+    "sys.path.append(os.path.join(os.getcwd(), '..'))\n",
+    "sys.path.append(os.path.join(os.getcwd(), '..', '..'))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "id": "570920e2",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "%matplotlib inline\n",
+    "\n",
+    "from session.sessions import selected_009266\n",
+    "from analysis.imports import *\n",
+    "#from analysis.loading import load_session_data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
    "id": "51eeaa1d",
    "metadata": {},
    "outputs": [],
@@ -28,8 +54,8 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 10,
-   "id": "0b384b99",
+   "execution_count": 13,
+   "id": "26ba4176",
    "metadata": {},
    "outputs": [
     {
@@ -89,6 +115,121 @@
     "    return [has_h5_ephys(path), has_dat(path), has_clu(path), ch_num(path)]"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "b3b05781",
+   "metadata": {},
+   "source": [
+    "## Unit count"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 14,
+   "id": "c12f7fc1",
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "['009266_hippoSIT_2023-04-13_08-57-46',\n",
+       " '009266_hippoSIT_2023-04-14_09-17-34',\n",
+       " '009266_hippoSIT_2023-04-17_09-06-10',\n",
+       " '009266_hippoSIT_2023-04-17_17-04-17',\n",
+       " '009266_hippoSIT_2023-04-18_10-10-37',\n",
+       " '009266_hippoSIT_2023-04-18_17-03-10',\n",
+       " '009266_hippoSIT_2023-04-19_10-33-51',\n",
+       " '009266_hippoSIT_2023-04-20_08-57-39',\n",
+       " '009266_hippoSIT_2023-04-20_15-24-14',\n",
+       " '009266_hippoSIT_2023-04-21_08-43-00',\n",
+       " '009266_hippoSIT_2023-04-21_13-12-31',\n",
+       " '009266_hippoSIT_2023-04-24_10-08-11',\n",
+       " '009266_hippoSIT_2023-04-24_16-56-55',\n",
+       " '009266_hippoSIT_2023-04-26_08-20-17',\n",
+       " '009266_hippoSIT_2023-05-02_12-22-14',\n",
+       " '009266_hippoSIT_2023-05-04_09-11-06',\n",
+       " '009266_hippoSIT_2023-05-04_19-47-15',\n",
+       " '009266_hippoSIT_2023-05-22_09-27-22',\n",
+       " '009266_hippoSIT_2023-05-23_09-18-05',\n",
+       " '009266_hippoSIT_2023-05-25_15-55-57',\n",
+       " '009266_hippoSIT_2023-06-14_08-21-23',\n",
+       " '009266_hippoSIT_2023-06-19_08-58-35']"
+      ]
+     },
+     "execution_count": 14,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "sessions = [s for s in selected_009266.keys()]\n",
+    "sessions.sort()\n",
+    "sessions"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 17,
+   "id": "351ef94c",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "unit_mx = np.zeros([len(sessions), 6])  # 6 shanks total\n",
+    "for i, session in enumerate(sessions):\n",
+    "    animal      = session.split('_')[0]\n",
+    "    h5_file     = os.path.join(source, animal, session, session + '.h5')\n",
+    "    \n",
+    "    with h5py.File(h5_file, 'r') as f:\n",
+    "        unit_names = [x for x in f['units']]\n",
+    "        \n",
+    "    shanks = np.unique([int(x[0]) for x in unit_names])\n",
+    "    shanks.sort()\n",
+    "    for shank in shanks:\n",
+    "        unit_mx[i, shank - 1] = len([x for x in unit_names if int(x[0]) == shank])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 19,
+   "id": "c8ecf2cb",
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "array([[ 6,  2,  6,  3,  0,  0],\n",
+       "       [11,  1,  6,  2,  1,  0],\n",
+       "       [ 8,  1,  6,  5,  1,  0],\n",
+       "       [14,  2,  7,  3,  1,  0],\n",
+       "       [12, 13, 14,  6,  4,  0],\n",
+       "       [17, 10, 13,  5,  3,  0],\n",
+       "       [19,  5,  6,  8,  3,  0],\n",
+       "       [ 0,  4, 15,  8,  5,  3],\n",
+       "       [ 0,  1,  6,  6,  8,  3],\n",
+       "       [ 0,  4, 17,  8,  6,  5],\n",
+       "       [ 1,  4, 17,  8,  0,  1],\n",
+       "       [22, 21,  0,  9,  1,  3],\n",
+       "       [18,  1, 22,  4,  0,  7],\n",
+       "       [ 2, 24,  9,  6,  5,  7],\n",
+       "       [ 0, 17,  0,  9,  5,  8],\n",
+       "       [25, 29, 30, 14,  7, 19],\n",
+       "       [30, 14,  7,  7,  1,  3],\n",
+       "       [31, 34, 10,  2,  2, 12],\n",
+       "       [22, 43,  4,  0,  1,  6],\n",
+       "       [14, 41, 55,  2,  1,  7],\n",
+       "       [11,  0, 35,  1,  1,  2],\n",
+       "       [32,  0, 17,  1,  0,  0]], dtype=int16)"
+      ]
+     },
+     "execution_count": 19,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "unit_mx.astype(np.int16)"
+   ]
+  },
   {
    "cell_type": "markdown",
    "id": "531ad003",
@@ -401,7 +542,9 @@
    "cell_type": "code",
    "execution_count": 20,
    "id": "d466c590",
-   "metadata": {},
+   "metadata": {
+    "scrolled": true
+   },
    "outputs": [
     {
      "data": {