def main():
    process_pool_context = multiprocessing.get_context('spawn')
    pool = multiprocessing.pool.Pool(
        processes=2,
        context=process_pool_context,
    )

    multiprocessing_manager = multiprocessing.Manager()
    multiprocessing_queue = multiprocessing_manager.Queue(
        maxsize=test_queue_size, )

    start = time.time()
    for i in range(test_queue_size):
        multiprocessing_queue.put(b'1')
    end = time.time()

    print('queue INSERTION:')
    print(end - start)

    pool.apply(func=consume_queue, args=(multiprocessing_queue, ), kwds={})

    regular_queue = queue.Queue()
    start = time.time()
    for i in range(test_queue_size):
        regular_queue.put(b'1')
    end = time.time()

    print('queue INSERTION:')
    print(end - start)
    consume_queue(regular_queue)
Пример #2
0
    def hyperpol_current(
            self, model, delay, dur, section_stim, loc_stim, section_rec,
            loc_rec
    ):  # find a hyperpolarizing current to stop spontaneous firing

        upper_bound = -0.02
        lower_bound = -0.01

        h_amps = numpy.arange(lower_bound, upper_bound, -0.002)

        for h_amp in h_amps:
            pool = multiprocessing.Pool(
                1, maxtasksperchild=1
            )  # I use multiprocessing to keep every NEURON related task in independent processes

            trace = pool.apply(self.run_cclamp_on_soma,
                               args=(model, 0, h_amp, delay, dur, section_stim,
                                     loc_stim, section_rec, loc_rec))
            pool.terminate()
            pool.join()
            del pool
            print('h_amp: ' + str(h_amp))
            spikecount = self.spikecount(delay, dur, trace)
            if spikecount == 0:
                hyperpol_amp = h_amp
                break

        return hyperpol_amp
Пример #3
0
    def binsearch(self, model, stim_range, delay, dur, section_stim, loc_stim, section_rec, loc_rec):
        c_minmax = stim_range
        c_step_start = 0.01
        c_step_stop= 0.002

        found = False
        spikecounts = []
        amplitudes = []

        while c_step_start >= c_step_stop and not found:

            c_stim = numpy.arange(c_minmax[0], c_minmax[1], c_step_start)

            first = 0
            last = numpy.size(c_stim, axis=0)-1

            while first <= last and not found:

                midpoint = (first + last)//2
                amplitude = c_stim[midpoint]

                result=[]

                pool = multiprocessing.Pool(1, maxtasksperchild = 1)    # I use multiprocessing to keep every NEURON related task in independent processes

                traces= pool.apply(self.run_cclamp_on_soma, args = (model, amplitude, delay, dur, section_stim, loc_stim, section_rec, loc_rec))
                pool.terminate()
                pool.join()
                del pool

                spikecount = self.spikecount(delay, dur, traces)

                amplitudes.append(amplitude)
                spikecounts.append(spikecount)

                #if spikecount >= 10 and spikecount <=20:
                if spikecount == 15:
                    found = True
                else:
                    #if spikecount > 20:
                    if spikecount > 15:
                        last = midpoint-1
                    #elif spikecount < 10:
                    elif spikecount < 15:
                        first = midpoint+1
            c_step_start=c_step_start/2

        if not found:
            amp_index = min((p for p in range(len(spikecounts)) if spikecounts[p] != 0), key=lambda i: abs(spikecounts[i]-15.0)) # we choose the one that is nearest to 15, but not 0
            # print list(p for p in range(len(spikecounts)) if spikecounts[p] != 0) # this gives the indices where spikecount is not 0, then i takes up these values
            #print amp_index
            amplitude = amplitudes[amp_index]
            spikecount = spikecounts[amp_index]


        binsearch_result=[found, amplitude, spikecount]
        #print binsearch_result

        return binsearch_result
Пример #4
0
def run_emodel_morph_isolated(input_args):
    """Run e-model morphology combination in isolated environment.

    Args:
        input_args: tuple
        - uid: unique identifier of the e-model morphology combination
        - emodel: e-model name
        - emodel_dir: directory containing e-model files
        - emodel_params: dict that maps e-model parameters to their values
        - morph_path: path to morphology

    Returns:
        Dict with keys 'exception', 'extra_values', 'scores', 'uid'.
    """

    uid, emodel, emodel_dir, emodel_params, morph_path = input_args

    return_dict = {}
    return_dict['uid'] = uid
    return_dict['exception'] = None

    pool = NestedPool(1, maxtasksperchild=1)

    try:
        return_dict['scores'], return_dict['extra_values'] = pool.apply(
            run_emodel_morph, (emodel, emodel_dir, emodel_params, morph_path))
    except Exception:
        return_dict['scores'] = None
        return_dict['extra_values'] = None
        return_dict['exception'] = "".join(
            traceback.format_exception(*sys.exc_info()))

    pool.terminate()
    pool.join()
    del pool

    return return_dict
Пример #5
0
    def generate_prediction(self, model, verbose=False):
        """Implementation of sciunit.Test.generate_prediction."""

        efel.reset()

        if self.base_directory:
            self.path_results = self.base_directory + 'results/' + 'backpropagating_AP/' + model.name + '/'
        else:
            self.path_results = model.base_directory + 'results/' + 'backpropagating_AP/'

        try:
            if not os.path.exists(self.path_results):
                os.makedirs(self.path_results)
        except OSError as e:
            if e.errno != 17:
                raise
            pass


        global model_name_bAP
        model_name_bAP = model.name

        distances = self.config['recording']['distances']
        tolerance = self.config['recording']['tolerance']

        dend_locations, actual_distances = model.find_trunk_locations_multiproc(distances, tolerance, self.trunk_origin)
        #print dend_locations, actual_distances

        print('Dendritic locations to be tested (with their actual distances):', actual_distances)

        traces={}
        delay = self.config['stimulus']['delay']
        duration = self.config['stimulus']['duration']
        #amplitude = self.config['stimulus']['amplitude']

        prediction = collections.OrderedDict()

        #plt.close('all') #needed to avoid overlapping of saved images when the test is run on multiple models in a for loop
        plt.close('all') #needed to avoid overlapping of saved images when the test is run on multiple models

        amplitude, message_to_logFile = self.find_current_amp(model, delay, duration, "soma", 0.5, "soma", 0.5)

        pool = multiprocessing.Pool(1, maxtasksperchild = 1)
        traces = pool.apply(self.cclamp, args = (model, amplitude, delay, duration, "soma", 0.5, dend_locations))

        filepath = self.path_results + self.test_log_filename
        self.logFile = open(filepath, 'w') # if it is opened before multiprocessing, the multiporeccing won't work under python3

        self.logFile.write('Dendritic locations to be tested (with their actual distances):\n'+ str(actual_distances)+'\n')
        self.logFile.write("---------------------------------------------------------------------------------------------------\n")

        self.logFile.write(message_to_logFile)


        #plt.close('all') #needed to avoid overlapping of saved images when the test is run on multiple models

        self.plot_traces(model, traces, dend_locations, actual_distances)

        traces_results = self.extract_somatic_spiking_features(traces, delay, duration)


        features = self.extract_amplitudes(traces, traces_results, actual_distances)

        features_json = collections.OrderedDict()
        for key in features:
            features_json[key] = collections.OrderedDict()
            for ke in features[key]:
                features_json[key][str(ke)] = collections.OrderedDict()
                for k, value in features[key][ke].items():
                    features_json[key][str(ke)][k] = str(value)


        # generating prediction
        for key in features:
            AP1_amps = numpy.array([])
            APlast_amps = numpy.array([])

            for k in features[key]:
                AP1_amps = numpy.append(AP1_amps, features[key][k]['AP1_amp'] )
            prediction['model_AP1_amp_at_'+str(key)+'um'] = {}
            prediction['model_AP1_amp_at_'+str(key)+'um']['mean'] = float(numpy.mean(AP1_amps))*mV
            prediction['model_AP1_amp_at_'+str(key)+'um']['std'] = float(numpy.std(AP1_amps))*mV

        for key in features:
            AP1_amps = numpy.array([])
            APlast_amps = numpy.array([])
            for k in features[key]:
                APlast_amps = numpy.append(APlast_amps, features[key][k]['APlast_amp'] )
            prediction['model_APlast_amp_at_'+str(key)+'um'] = {}
            prediction['model_APlast_amp_at_'+str(key)+'um']['mean'] = float(numpy.mean(APlast_amps))*mV
            prediction['model_APlast_amp_at_'+str(key)+'um']['std'] = float(numpy.std(APlast_amps))*mV

        prediction_json = collections.OrderedDict()
        for key in prediction:
            prediction_json[key] = collections.OrderedDict()
            for k, value in prediction[key].items():
                prediction_json[key][k]=str(value)


        file_name_json = self.path_results + 'bAP_model_features_means.json'
        json.dump(prediction_json, open(file_name_json, "w"), indent=4)
        file_name_features_json = self.path_results + 'bAP_model_features.json'
        json.dump(features_json, open(file_name_features_json, "w"), indent=4)

        if self.save_all:
            file_name_pickle = self.path_results + 'bAP_model_features.p'

            pickle.dump(features, gzip.GzipFile(file_name_pickle, "wb"))

            file_name_pickle = self.path_results + 'bAP_model_features_means.p'

            pickle.dump(prediction, gzip.GzipFile(file_name_pickle, "wb"))

        self.plot_features(model, features, actual_distances)

        efel.reset()

        return prediction
Пример #6
0
 def func_isolated(*args, **kwargs):
     with NestedPool(1, maxtasksperchild=1) as pool:
         return pool.apply(func, args, kwargs)
Пример #7
0
def _run_next_graph_nodes(graph, node, globals_, locals_, pool):

    operator = graph.node[node].get('OPERATOR', None)

    nodes_return_value = []

    return_value = None

    # False? Terminate Flow.

    if isinstance(locals_['_'], bool) and locals_['_'] is False:

        return False

    if operator:

        #   -->  (a)
        #   --> / | \
        #    (b) (c) (d)
        #       \ | /
        #        (e)

        next_nodes = sorted(graph.successors(node))

        # N-1

        for next_node in next_nodes[1:]:

            # Synchronous

            if operator == '|':

                nodes_return_value.append(
                    pool.apply(_run,
                               args=(graph, next_node, globals_, locals_, {},
                                     None, False)))

            # Asynchronous

            if operator == '->':

                nodes_return_value.append(
                    pool.apply_async(_run,
                                     args=(graph, next_node, globals_, locals_,
                                           {}, None, False)))

        # 1

        nodes_return_value.insert(
            0, _run(graph, next_nodes[0], globals_, locals_, {}, None, False))

        pool.close()

        pool.join()

        pool.terminate()

        return_value = __resolve_and_merge_results(nodes_return_value)

    else:

        #        (a)
        #       / | \
        #    (b) (c) (d)
        #       \ | /
        #    --> (e)

        return_value = locals_['_']

    return return_value
Пример #8
0
def _run_next_virtual_nodes(graph, node, globals_, locals_, flags, pool,
                            result):

    operator = graph.node[node].get('OPERATOR', None)

    return_value = []

    not_safe_to_iter = False

    is_head_result = True

    head_result = None

    # "Hello, world" or {...}

    if isinstance(result, (basestring, dict)) or not __isiter(result):

        not_safe_to_iter = True

    # [[1]]

    if isinstance(result, list) and len(result) == 1 and isinstance(
            result[0], list):

        result = result[0]

        not_safe_to_iter = True

    # More nodes ahead?

    if operator:

        if not_safe_to_iter:

            logging.debug('not_safe_to_iter is True for %s' % result)

            head_result = result

            tmp_globals = copy.copy(globals_)

            tmp_locals = copy.copy(locals_)

            tmp_globals['_'] = tmp_locals['_'] = head_result

            return_value = __resolve_and_merge_results(
                _run(graph, node, tmp_globals, tmp_locals, {}, None, True))

        else:

            # Originally this was implemented using result[0] and result[1:] but xrange() is not slice-able, thus, I have changed it to `for` with buffer for 1st result

            for res_value in result:

                logging.debug('Now at %s from %s' % (res_value, result))

                if is_head_result:

                    logging.debug('is_head_result is True for %s' % res_value)

                    is_head_result = False

                    head_result = res_value

                    tmp_globals = copy.copy(globals_)

                    tmp_locals = copy.copy(locals_)

                    tmp_globals['_'] = tmp_locals['_'] = head_result

                    return_value.insert(
                        0,
                        _run(graph, node, tmp_globals, tmp_locals, {}, None,
                             True))

                    continue

                tmp_globals = copy.copy(globals_)

                tmp_locals = copy.copy(locals_)

                tmp_globals['_'] = tmp_locals['_'] = res_value

                # Synchronous

                if operator == '|':

                    return_value.append(
                        pool.apply(_run,
                                   args=(graph, node, tmp_globals, tmp_locals,
                                         {}, None, True)))

                # Asynchronous

                if operator == '->':

                    return_value.append(
                        pool.apply_async(_run,
                                         args=(graph, node, tmp_globals,
                                               tmp_locals, {}, None, True)))

            pool.close()

            pool.join()

            pool.terminate()

            logging.debug('return_value = %s' % return_value)

            return_value = __resolve_and_merge_results(return_value)

    # Loopback

    else:

        # AS IS

        if not_safe_to_iter:

            return_value = [result]

        # Iterate for all possible *return values*

        else:

            for res_value in result:

                return_value.append(res_value)

            # Unbox

            if len(return_value) == 1:

                return_value = return_value[0]

    return return_value
Пример #9
0
 def render_one(self, order: 'Order') -> Optional['Order']:
     with multiprocessing.pool.Pool(initializer=self.worker_init,
                                    processes=1) as pool:
         return pool.apply(self.prepare_order, (order, ))
Пример #10
0
def run(args):
    '''Main function for ORF finding
  '''
    # prepare
    global tisbampaths, tisoffdict, ribobampaths, riboffdict, genomefapath, compatible, compatiblemis
    global minaalen, enrichtest, slp, paras, verbose, alt, title, tis2ribo, gfilter
    global tpth, fpth, minpth, fspth, framebest, framelocalbest, longest, transprofile, TIS_types  #fspth
    global paired, seq, aaseq, blocks  # showtime
    paired, seq, aaseq, blocks = args.paired, args.seq, args.aaseq, args.blocks
    ribo.maxNH, ribo.minMapQ, ribo.secondary = args.maxNH, args.minMapQ, args.secondary
    tisbampaths = args.tisbampaths
    ribobampaths = args.ribobampaths
    if len(tisbampaths) == 0 and len(ribobampaths) == 0:
        print('No bam file input!')
        exit(1)
    genomefapath = args.genomefapath
    compatible = not args.nocompatible
    compatiblemis = args.compatiblemis
    minaalen = args.minaalen
    enrichtest = args.enrichtest
    transprofile = args.transprofile
    harrwidth = None
    TIS_types = [
        'Annotated', 'Truncated', 'Extended', "5'UTR", "3'UTR", 'Internal',
        'Novel'
    ]
    if args.chrmap is not None:
        chrmap = {}
        for lst in io.splitIter(args.chrmap):
            chrmap[lst[0]] = lst[1]
            chrmap[lst[1]] = lst[0]
        bam.chrmap = chrmap
        fa.chrmap = chrmap
    if args.harrwidth is not None: harrwidth = args.harrwidth
    elif args.harr: harrwidth = 15
    verbose = args.verbose
    alt = args.alt
    if args.altcodons is not None:
        alt = True
        if args.altcodons[0].upper() == 'ALL': orf.cstartlike = orf.allcodons
        else: orf.cstartlike = [c.upper() for c in args.altcodons]
    tpth, fpth, minpth, framebest, framelocalbest = args.tpth, args.fpth, args.minpth, args.framebest, args.framelocalbest  # fspth
    fspth = args.fspth
    longest = args.longest
    tis2ribo = args.tis2ribo
    parts = [0.1 * (i + 1) for i in range(args.nparts)]
    gfilter = None
    if args.genefilter is not None:
        gfilter = {}
        for gid in args.genefilter:
            gfilter[gid] = 1
    flank = 3  ##
    tisoffdict = find_offset(args.tisbampaths, args.tispara)
    riboffdict = find_offset(args.ribobampaths, args.ribopara)
    if len(args.ribobampaths) == 0:
        print(
            'No regular RiboSeq data input. TIS data will also be used as regular RiboSeq data.'
        )
        tis2ribo = True
    if len(args.tisbampaths) == 1:
        if args.inestpath is None:
            path = args.tisbampaths[0] + '.bgest.txt'
            if isfile(path): args.inestpath = path
            else: args.estpath = path
    if args.agenepath is None: args.agenepath = args.genepath

    # load genome, fasta file indexing
    if args.verbose: print("{} Loading genome...".format(time.ctime()))
    genome = fa.Fa(args.genomefapath, verbose=args.verbose)

    # TIS background estimation
    if len(args.tisbampaths) == 0:
        print('No input TIS data!')
        paras, slp = [(1, 0.5)], [1]  # No TIS input
    elif args.inestpath is None:  #== '' :
        print("{} Estimating TIS background parameters...".format(
            time.ctime()))
        if args.verbose:
            print(
                "TIS background estimation result will be saved to {}".format(
                    args.estpath))
        if args.numProc > 1:
            from multiprocessing import Process
            import multiprocessing.pool

            class NoDaemonProcess(Process):
                # make 'daemon' attribute always return False
                def _get_daemon(self):
                    return False

                def _set_daemon(self, value):
                    pass

                daemon = property(_get_daemon, _set_daemon)

            class MyPool(multiprocessing.pool.Pool):
                Process = NoDaemonProcess

            pool = MyPool(1)  # This is for memory efficiency
            paras, slp, data = pool.apply(ribo.estimateTISbg,
                                          args=(args.agenepath,
                                                args.tisbampaths,
                                                args.genomefapath),
                                          kwds={
                                              'parts': parts,
                                              'offdict': tisoffdict,
                                              'numProc': args.numProc,
                                              'verbose': args.verbose,
                                              'geneformat': args.geneformat,
                                              'harrwidth': harrwidth,
                                              'paired': paired
                                          })
            pool.close()
        else:
            paras, slp, data = ribo.estimateTISbg(args.genepath,
                                                  args.tisbampaths,
                                                  args.genomefapath,
                                                  parts=parts,
                                                  offdict=tisoffdict,
                                                  numProc=1,
                                                  verbose=verbose,
                                                  geneformat=args.geneformat,
                                                  harrwidth=harrwidth,
                                                  paired=paired)
        estfile = open(args.estpath, 'w')
        for i in range(len(parts)):
            estfile.write("{}\t{}\t{}\t{}\t{}\n".format(
                paras[i][0], paras[i][1], parts[i], slp[i], data[i]))
        estfile.close()

    else:
        inestfile = open(args.inestpath, 'r')
        paras, slp = [], []
        for l in inestfile:
            lst = l.strip().split('\t')
            paras.append((float(lst[0]), float(lst[1])))
            slp.append(eval(lst[3]))

    if args.inprofile is not None and not isfile(args.inprofile):
        print('inprofile {} not found!'.format(args.inprofile))
        if args.transprofile is None:
            transprofile = args.inprofile

    if args.numProc > 1:
        from multiprocessing import Pool
        pool = Pool(processes=args.numProc - 1)

    cds_regions = {}
    known_tis = {}
    if args.agenepath != args.genepath:
        if verbose: print('Loading CDS annotation...')
        for g in io.geneIter(args.agenepath,
                             fileType=args.geneformat,
                             chrs=genome.idx,
                             verbose=args.verbose):
            if g.chr not in cds_regions:
                cds_regions[g.chr] = {
                    '+': [interval.Interval() for i in range(3)],
                    '-': [interval.Interval() for i in range(3)]
                }
                known_tis[g.chr] = {'+': {}, '-': {}}
            for t in g.trans:
                cr = interval.cds_region_trans(t)
                for i in range(3):
                    cds_regions[t.chr][t.strand][i].lst += cr[i].lst
            #for t in g.trans :
                tis = t.cds_start(cdna=False)
                if tis is not None: known_tis[t.chr][t.strand][tis] = 1

    inorf = None
    if args.input is not None:
        if verbose: print('Loading candidates...')
        inorf = {}
        infile = open(args.input, 'r')
        for l in infile:
            lst = l.strip().split()
            tid, tis, stop = lst[0], int(lst[1]), int(lst[2])
            #if gfilter is not None and tid not in gfilter : continue
            if tid not in inorf: inorf[tid] = []
            inorf[tid].append([tis, stop])
    inprofile = None
    if args.inprofile is not None:
        if isfile(args.inprofile):
            if verbose: print('Loading transcript profile...')
            inprofile = {}
            for lst in io.splitIter(args.inprofile):
                try:
                    gid, tid, tispf, ribopf = lst[0], lst[1], eval(
                        lst[3]), eval(lst[4])
                except:
                    continue
                if gid not in inprofile: inprofile[gid] = {}
                inprofile[gid][tid] = tispf, ribopf

    print("{} Predicting...".format(time.ctime()))
    profile = exp.Profile()
    title = ['TISGroup', 'TISCounts', 'TISPvalue', 'RiboPvalue', 'RiboPStatus']
    j = [0, 0]  # total number of ORF/TIS for BH correction
    gene_iter = io.geneIter(args.genepath,
                            fileType=args.geneformat,
                            chrs=genome.idx,
                            verbose=args.verbose)
    para_iter = genePara(gene_iter, inorf, inprofile)
    if args.numProc <= 1: pred_iter = itertools.imap(_pred_gene, para_iter)
    else:
        #from multiprocessing import Pool
        #pool = Pool(processes = args.numProc - 1)
        pred_iter = pool.imap_unordered(_pred_gene, para_iter, chunksize=5)
    if transprofile is not None:
        tpfile = open(transprofile, 'w')
        tpfile.write('Gid\tTid\tSymbol\tTISProf\tRiboProf\n')

    for result in pred_iter:
        es, ji, tpfs, g = result
        j[0] += ji[0]
        j[1] += ji[1]
        for e in es:
            profile.add_exp(e)
            if verbose >= 2: print('{} {}'.format(time.ctime(), str(e)))
        if transprofile is not None:
            for tid in tpfs:
                tpfile.write(io.tabjoin(tid, tpfs[tid]) + '\n')
        if g.chr not in cds_regions:
            cds_regions[g.chr] = {
                '+': [interval.Interval() for i in range(3)],
                '-': [interval.Interval() for i in range(3)]
            }
            known_tis[g.chr] = {'+': {}, '-': {}}
        for t in g.trans:
            cr = interval.cds_region_trans(t)
            for i in range(3):
                cds_regions[t.chr][t.strand][i].lst += cr[i].lst
        #for t in g.trans :
            tis = t.cds_start(cdna=False)
            if tis is not None: known_tis[t.chr][t.strand][tis] = 1

    for chr in cds_regions:
        for strand in cds_regions[chr]:
            for i in range(3):
                cds_regions[chr][strand][i].check()
    print("{} Checking overlap with known CDS..".format(time.ctime()))
    for e in profile:
        if e.tistype == 0: continue
        elif e.gtis in known_tis[e.chr][e.strand]: e.id += ':Known'
        elif e.tistype > 1:  # ["5'UTR", "3'UTR", "Inside", "Novel", 'Extended'] :
            #coding_overlap = False
            for i in range(3):
                its = cds_regions[e.chr][e.strand][i].intersect(
                    e.cr[i]
                )  # e.cr[i].intersect(cds_regions[e.chr][e.strand][i])
                if its.rlen() > 0:
                    #coding_overlap = True
                    e.id += ':CDSFrameOverlap'
                    break

    print("{} BH correcting...".format(time.ctime()))
    profile.BHcorrection(2, total=j[1],
                         append=True)  # Calculate BH FDR of TIS p value
    profile.BHcorrection(3, total=j[0], append=True)  # Frame p value
    i = 1
    if len(tisbampaths) == 0: i = 0
    profile.BHcorrection(5, total=j[i],
                         append=True)  # Calculate BH FDR for Fisher's p value

    outfile = open(args.output, 'w')
    s = "Gid\tTid\tSymbol\tGeneType\tGenomePos\tStartCodon\tStart\tStop\tTisType\t"
    s += '\t'.join(title)
    s += '\tFisherPvalue\tTISQvalue\tFrameQvalue\tFisherQvalue\tAALen'
    if seq: s += '\tSeq'
    if aaseq: s += '\tAASeq'
    if blocks: s += '\tBlocks'
    s += '\n'
    outfile.write(s)

    if args.allresult is not None and args.allresult.upper() == 'OFF':
        allout = None
    elif args.fsqth == 1:
        allout = None
    else:
        if args.allresult is None:
            lst = args.output.split('.')
            if lst[-1] == 'txt': args.allresult = args.output[:-4] + '_all.txt'
            else: args.allresult = args.output + '_all.txt'
        allout = open(args.allresult, 'w')
        allout.write(s)

    for e in profile:
        #if e.q > args.fsqth : continue
        if len(tisbampaths) == 0:
            e.data[5], e.data[8] = None, None  # No Fisher's
        s = "%s\t%d" % (e, e.length)
        if seq: s += '\t' + e.sq
        if aaseq: s += '\t' + e.aa
        if blocks: s += '\t' + e.blocks
        s += '\n'
        if allout is not None: allout.write(s)
        if e.q <= args.fsqth:
            outfile.write(s)  # "%s\t%d\n" % (e, e.length)) #, e.sq))
Пример #11
0
    def find_rheobase(self, model, hyperpol_amp, delay, dur, section_stim,
                      loc_stim, section_rec,
                      loc_rec):  #to find the rheobase current

        print('Finding rheobase current...')

        upper_bound = 1.0
        lower_bound = 0.0

        amps = numpy.arange(lower_bound, upper_bound, 0.1)

        message_to_logFile = ''  # as it can not be open here because then  multiprocessing won't work under python3

        for amp in amps:
            # trace = run_cclamp_on_soma(self, model, amp = amp, delay=delay, dur=dur, section_stim=section_stim, loc_stim=loc_stim, section_rec=section_rec, loc_rec=loc_rec)
            pool = multiprocessing.Pool(
                1, maxtasksperchild=1
            )  # I use multiprocessing to keep every NEURON related task in independent processes

            trace = pool.apply(
                self.run_cclamp_on_soma,
                args=(model, amp, hyperpol_amp, delay, dur, section_stim,
                      loc_stim, section_rec,
                      loc_rec))  #itt kéne run_cclamp_on_soma_hyperpol
            pool.terminate()
            pool.join()
            del pool

            spikecount = self.spikecount(delay, dur, trace)

            if amp == upper_bound and spikecount == 0:

                message_to_logFile += 'The model didn\'t fire even at ' + str(
                    upper_bound) + ' nA current step\n'
                message_to_logFile += "---------------------------------------------------------------------------------------------------\n"

                print('The model didn\'t fire even at ' + str(upper_bound) +
                      ' nA current step')
                amplitude = None

            if spikecount > 0:
                upper_bound = amp
                break

        # binary search

        depth = 1
        max_depth = 5
        precision = 0.005
        while depth < max_depth or abs(upper_bound - lower_bound) > precision:
            print('ciklus')
            middle_bound = upper_bound - abs(upper_bound - lower_bound) / 2.0

            pool = multiprocessing.Pool(
                1, maxtasksperchild=1
            )  # I use multiprocessing to keep every NEURON related task in independent processes

            trace = pool.apply(self.run_cclamp_on_soma,
                               args=(model, middle_bound, hyperpol_amp, delay,
                                     dur, section_stim, loc_stim, section_rec,
                                     loc_rec))
            pool.terminate()
            pool.join()
            del pool

            spikecount = self.spikecount(delay, dur, trace)
            if spikecount == 0:
                lower_bound = middle_bound
                upper_bound = upper_bound
                depth = depth + 1
            else:
                lower_bound = lower_bound
                upper_bound = middle_bound
                depth = depth + 1

        message_to_logFile += 'Rheobase current: ' + str(upper_bound) + ' nA\n'
        message_to_logFile += "---------------------------------------------------------------------------------------------------\n"

        return upper_bound, message_to_logFile
Пример #12
0
    def test_conflict_errors(self):
        """
        Here we realize that conflict errors occur only occur when concurrent modifications
        on a particular container (with specific oid) occur concurrently. Updates can still
        be lost if a branch of the object tree is disconnected from the root while one of 
        its leaves gets updated.
        
        Similarly, readCurrent() only protects a specific container of the object tree,
        which can still be disconnected from the root by a transaction, while its content
        is updated by another transaction.
        """

        conn = self.conn

        root = conn.root

        root.stuff = PersistentList([9])
        root.origin = PersistentList([3])
        root.target = PersistentList([8])
        root.dummy1 = PersistentList([9])
        transaction.commit()

        # basic conflict on root #

        pool.apply(delete_container, args=(self.db, "dummy1"))

        root.dummy2 = 5

        self.assertRaises(ConflictError, transaction.commit)  # conflict !!
        self.assertRaises(TransactionFailedError, transaction.commit)  # transaction broken

        transaction.abort()

        self.assertFalse(hasattr(root, "dummy2"))  # rolled back

        # no conflict when a branch gets detached while leaf is updated

        container = root.stuff

        pool.apply(delete_container, args=(self.db, "stuff"))

        container[0] = 88

        transaction.commit()

        self.assertFalse(hasattr(root, "stuff"))  # update lost

        # without readCurrent() - lost update #

        root.origin = PersistentList([13])
        value = root.origin

        pool.apply(transfer_list_value, args=(self.db, "origin", "target"))

        root.target = value

        transaction.commit()

        self.assertEqual(root.target, PersistentList([13]))  # we lost [3]

        # with readCurrent() and container update - ReadConflictError raised! #


        root.origin = PersistentList([17])
        transaction.commit()

        res = conn.readCurrent(root.target)  # container object selected !!
        assert res is None  # no return value expected

        value = root.target

        pool.apply(transfer_list_value, args=(self.db, "origin", "target"))

        root.othertarget = value

        self.assertRaises(Exception, transaction.commit)

        self.assertEqual(root.target, PersistentList([17]))  # auto refreshing occurred
        self.assertFalse(hasattr(root, "othertarget"))  # auto refreshing occurred

        self.assertRaises(Exception, transaction.commit)  # but transaction still broken

        transaction.abort()
        transaction.commit()  # now all is ok once again

        # with readCurrent() and container deletion - somehow lost update! #

        value = root.origin[0]

        res = conn.readCurrent(root.origin)  # container object selected !!
        assert res is None  # no return value expected

        pool.apply(delete_container, args=(self.db, "origin"))

        root.target[0] = value  # we use a value whose origin has now been deleted in other thread

        transaction.commit()  # here it's OK, the deleted object still remains in the DB history even if unreachable
Пример #13
0
def some_thread():
    while True:
        r = pool.apply(some_work, (args))
Пример #14
0
def _run_next_graph_nodes(graph, node, globals_, locals_, pool):

    operator = graph.node[node].get('OPERATOR', None)

    nodes_return_value = []

    return_value = None

    # False? Terminate Flow.

    if isinstance(locals_['_'], bool) and locals_['_'] is False:

        return False

    if operator:

        #   -->  (a)
        #   --> / | \
        #    (b) (c) (d)
        #       \ | /
        #        (e)

        next_nodes = sorted(graph.successors(node))

        # N-1

        for next_node in next_nodes[1:]:

            # Synchronous

            if operator == '|':

                nodes_return_value.append(pool.apply(_run, args=(graph, next_node, globals_, locals_, {}, None, False)))

            # Asynchronous

            if operator == '->':

                nodes_return_value.append(pool.apply_async(_run, args=(graph, next_node, globals_, locals_, {}, None, False)))

        # 1

        nodes_return_value.insert(0, _run(graph, next_nodes[0], globals_, locals_, {}, None, False))

        pool.close()

        pool.join()

        pool.terminate()

        return_value = __resolve_and_merge_results(nodes_return_value)

    else:

        #        (a)
        #       / | \
        #    (b) (c) (d)
        #       \ | /
        #    --> (e)

        return_value = locals_['_']

    return return_value
Пример #15
0
def _run_next_virtual_nodes(graph, node, globals_, locals_, flags, pool, result):

    operator = graph.node[node].get('OPERATOR', None)

    return_value = []

    not_safe_to_iter = False

    is_head_result = True

    head_result = None

    # "Hello, world" or {...}

    if isinstance(result, (basestring, dict)) or not __isiter(result):

        not_safe_to_iter = True

    # [[1]]

    if isinstance(result, list) and len(result) == 1 and isinstance(result[0], list):

        result = result[0]

        not_safe_to_iter = True

    # More nodes ahead?

    if operator:

        if not_safe_to_iter:

            logging.debug('not_safe_to_iter is True for %s' % result)

            head_result = result

            tmp_globals = copy.copy(globals_)

            tmp_locals = copy.copy(locals_)

            tmp_globals['_'] = tmp_locals['_'] = head_result

            return_value = __resolve_and_merge_results(_run(graph, node, tmp_globals, tmp_locals, {}, None, True))

        else:

            # Originally this was implemented using result[0] and result[1:] but xrange() is not slice-able, thus, I have changed it to `for` with buffer for 1st result

            for res_value in result:

                logging.debug('Now at %s from %s' % (res_value, result))

                if is_head_result:

                    logging.debug('is_head_result is True for %s' % res_value)

                    is_head_result = False

                    head_result = res_value

                    tmp_globals = copy.copy(globals_)

                    tmp_locals = copy.copy(locals_)

                    tmp_globals['_'] = tmp_locals['_'] = head_result

                    return_value.insert(0, _run(graph, node, tmp_globals, tmp_locals, {}, None, True))

                    continue

                tmp_globals = copy.copy(globals_)

                tmp_locals = copy.copy(locals_)

                tmp_globals['_'] = tmp_locals['_'] = res_value

                # Synchronous

                if operator == '|':

                    return_value.append(pool.apply(_run, args=(graph, node, tmp_globals, tmp_locals, {}, None, True)))

                # Asynchronous

                if operator == '->':

                    return_value.append(pool.apply_async(_run, args=(graph, node, tmp_globals, tmp_locals, {}, None, True)))

            pool.close()

            pool.join()

            pool.terminate()

            logging.debug('return_value = %s' % return_value)

            return_value = __resolve_and_merge_results(return_value)

    # Loopback

    else:

        # AS IS

        if not_safe_to_iter:

            return_value = [result]

        # Iterate for all possible *return values*

        else:

            for res_value in result:

                return_value.append(res_value)

            # Unbox

            if len(return_value) == 1:

                return_value = return_value[0]

    return return_value
Пример #16
0
    def test_conflict_errors(self):
        """
        Here we realize that conflict errors occur only occur when concurrent modifications
        on a particular container (with specific oid) occur concurrently. Updates can still
        be lost if a branch of the object tree is disconnected from the root while one of 
        its leaves gets updated.
        
        Similarly, readCurrent() only protects a specific container of the object tree,
        which can still be disconnected from the root by a transaction, while its content
        is updated by another transaction.
        """

        conn = self.conn

        root = conn.root

        root.stuff = PersistentList([9])
        root.origin = PersistentList([3])
        root.target = PersistentList([8])
        root.dummy1 = PersistentList([9])
        transaction.commit()


        # basic conflict on root #

        pool.apply(delete_container, args=(self.db, "dummy1"))

        root.dummy2 = 5

        self.assertRaises(ConflictError, transaction.commit) # conflict !!
        self.assertRaises(TransactionFailedError, transaction.commit) # transaction broken

        transaction.abort()

        self.assertFalse(hasattr(root, "dummy2")) # rolled back



        # no conflict when a branch gets detached while leaf is updated

        container = root.stuff

        pool.apply(delete_container, args=(self.db, "stuff"))

        container[0] = 88

        transaction.commit()

        self.assertFalse(hasattr(root, "stuff")) # update lost



        # without readCurrent() - lost update #

        root.origin = PersistentList([13])
        value = root.origin

        pool.apply(transfer_list_value, args=(self.db, "origin", "target"))

        root.target = value

        transaction.commit()

        self.assertEqual(root.target, PersistentList([13])) # we lost [3]



        # with readCurrent() and container update - ReadConflictError raised! #


        root.origin = PersistentList([17])
        transaction.commit()

        res = conn.readCurrent(root.target) # container object selected !!
        assert res is None # no return value expected

        value = root.target

        pool.apply(transfer_list_value, args=(self.db, "origin", "target"))

        root.othertarget = value

        self.assertRaises(Exception, transaction.commit)

        self.assertEqual(root.target, PersistentList([17])) # auto refreshing occurred
        self.assertFalse(hasattr(root, "othertarget")) # auto refreshing occurred

        self.assertRaises(Exception, transaction.commit) # but transaction still broken

        transaction.abort()
        transaction.commit() # now all is ok once again



        # with readCurrent() and container deletion - somehow lost update! #

        value = root.origin[0]

        res = conn.readCurrent(root.origin) # container object selected !!
        assert res is None # no return value expected

        pool.apply(delete_container, args=(self.db, "origin"))

        root.target[0] = value # we use a value whose origin has now been deleted in other thread

        transaction.commit() # here it's OK, the deleted object still remains in the DB history even if unreachable