Example #1
0
def populatemytables(paralel=True, cores=9):
    IDs = {
        k: v
        for k, v in zip(*lab.WaterRestriction().fetch(
            'water_restriction_number', 'subject_id'))
    }
    if paralel:
        schema = dj.schema(get_schema_name('foraging_analysis'), locals())
        # schema.jobs.delete()

        if use_ray:
            ray.init(num_cpus=cores)
            arguments = {
                'display_progress': False,
                'reserve_jobs': True,
                'order': 'random'
            }

            for runround in [1]:
                print('round ' + str(runround) + ' of populate')
                result_ids = []
                for coreidx in range(cores):
                    result_ids.append(
                        populatemytables_core_paralel.remote(
                            arguments, runround))
                ray.get(result_ids)
            ray.shutdown()
        else:  # Use multiprocessing
            arguments = {
                'display_progress': False,
                'reserve_jobs': False,
                'order': 'random'
            }

            for runround in [1]:
                print('round ' + str(runround) + ' of populate')

                result_ids = [
                    pool.apply_async(populatemytables_core_paralel,
                                     args=(arguments, runround))
                    for coreidx in range(cores)
                ]

                for result_id in result_ids:
                    result_id.get()

        # Just in case there're anything missing?
        arguments = {'display_progress': True, 'reserve_jobs': False}
        populatemytables_core(arguments, runround)

    else:
        for runround in [1]:
            arguments = {
                'display_progress': True,
                'reserve_jobs': False,
                'order': 'random'
            }
            populatemytables_core(arguments, runround)
Example #2
0
def erd(*args):
    report = dj.create_virtual_module('report', get_schema_name('report'))
    mods = (ephys, lab, experiment, tracking, psth, ccf, histology, report,
            publication)
    for mod in mods:
        modname = str().join(mod.__name__.split('.')[1:])
        fname = os.path.join('images', '{}.png'.format(modname))
        print('saving', fname)
        dj.ERD(mod, context={modname: mod}).save(fname)
Example #3
0
def dropdbs():
    print('dropping databases')
    for d in [
            'ingest_histology', 'ingest_ephys', 'ingest_tracking',
            'ingest_behavior', 'publication', 'psth', 'tracking', 'ephys',
            'experiment', 'lab', 'ccf'
    ]:
        dname = get_schema_name(d)
        print('..  {} ({})'.format(d, dname))
        try:
            schema = dj.schema(dname)
            schema.drop(force=True)
        except:
            pass
Example #4
0
import csv
import re
import json
import math

import numpy as np
import scipy.io as scio
import datajoint as dj
from datetime import datetime

from pipeline import lab, ephys, experiment, ccf, histology, report
from pipeline import get_schema_name, dict_to_hash

from pipeline.ingest import behavior as behavior_ingest

schema = dj.schema(get_schema_name('ingest_histology'))

log = logging.getLogger(__name__)


def get_histology_paths():
    """
    retrieve histology paths from dj.config
    config should be in dj.config of the format:

      dj.config = {
        ...,
        'custom': {
          'histology_data_paths': ['/path/string', '/path2/string']
        }
        ...
Example #5
0
import seaborn as sns

import io
from PIL import Image
import itertools

from pipeline import experiment, ephys, psth, tracking, lab, histology, ccf, foraging_analysis
from pipeline.plot import behavior_plot, unit_characteristic_plot, unit_psth, histology_plot, PhotostimError, foraging_plot
from pipeline import get_schema_name
from pipeline.plot.util import _plot_with_sem, _jointplot_w_hue
from pipeline.util import _get_trial_event_times

import warnings
warnings.filterwarnings('ignore')

schema = dj.schema(get_schema_name('report'))

os.environ['DJ_SUPPORT_FILEPATH_MANAGEMENT'] = "TRUE"

DEFAULT_REPORT_STORE = {
    "protocol": "s3",
    "endpoint": "s3.amazonaws.com",
    "bucket": "map-report",
    "location": "report/v2",
    "stage": "./data/report_stage",
    "access_key": "",
    "secret_key": ""
}

if 'stores' not in dj.config:
    dj.config['stores'] = {}
import datajoint as dj
import logging

from pipeline import lab, experiment
from pipeline import get_schema_name
from pipeline.ingest import get_loader

schema = dj.schema(get_schema_name('ingestion'))

log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
"""
For debugging purposes (to be removed)
from pipeline.ingest import session_ingest
from pipeline.ingest.loaders.wenxi import WenxiLoader
self = WenxiLoader('Z:/all_staff/Wenxi/PrV_Wall_Recordings/', dj.config)
key= {'subject_id': 'Rec8', 'session': 1}
subject_name = 'Rec8'
"""

# ============== SESSION INGESTION ==================


@schema
class InsertedSession(dj.Imported):
    definition = """  # Ingestion-specific information about a session
    -> experiment.Session
    ---
    loader_method: varchar(16)  # name of loader method used
    sess_data_dir: varchar(255) # directory path for this session, relative to root data directory
    """
Example #7
0
import datajoint as dj

from pipeline import get_schema_name

schema = dj.schema(get_schema_name('fixes'))


@schema
class FixHistory(dj.Manual):
    """
    Any fixes requiring accompanying tables, those tables must be children of this FixHistory
    """
    definition = """
    fix_name:   varchar(255)
    fix_timestamp:   timestamp
    """
Example #8
0
import datajoint as dj
from pipeline import (experiment, get_schema_name)
schema = dj.schema(get_schema_name('foraging_analysis'), locals())
import numpy as np
import pandas as pd
import math
dj.config["enable_python_native_blobs"] = True
#%%
bootstrapnum = 100
minimum_trial_per_block = 30


#%%
@schema
class TrialStats(dj.Computed):
    definition = """
    -> experiment.BehaviorTrial
    ---
    reaction_time = null : decimal(8,4) # reaction time in seconds (first lick relative to go cue) [-1 in case of ignore trials]
    double_dipping = null: tinyint # Whether this is a double dipped trial
    """
    # Foraging sessions only
    key_source = experiment.BehaviorTrial & 'task LIKE "foraging%"'

    def make(self, key):
        trial_stats = dict()

        # -- Reaction time --
        gocue_time = (experiment.TrialEvent & key
                      & 'trial_event_type = "go"').fetch1('trial_event_time')
        q_all_licks_after_go_cue = (