def line_integration(rmid):
    print("Begin process for " + str(rmid))
    mjd_list = map(int, os.listdir(Location.project_loca + "data/raw/" +
                                   str(rmid)))
    os.chdir(Location.project_loca + "/result/flux_of_line/")
    try:
        os.mkdir(str(rmid))
    except OSError:
        pass
    pool = Pool(processes=32)
    m = Manager()
    lock = m.Lock()
    fe2dic = m.dict()
    hbetandic = m.dict()
    hbetabdic = m.dict()
    o3dic = m.dict()
    contdic = m.dict()
    func = partial(line_integration_single, rmid, lock, fe2dic, hbetandic,
                   hbetabdic, o3dic, contdic)
    pool.map(func, mjd_list)
    output_flux(rmid, dict(fe2dic), "Fe2")
    output_flux(rmid, dict(hbetandic), "Hbetan")
    output_flux(rmid, dict(hbetabdic), "Hbetab")
    output_flux(rmid, dict(contdic), "cont")
    output_flux(rmid, dict(o3dic), "O3")
    pool.close()
    pool.join()
def build_av_tf_idf_dv(docs, doc_num, model, save=True, save_file="doc_vector_tfidf.bin"):
    docs = list(docs)
    vectorizer = CountVectorizer()
    tfidf_transformer = TfidfTransformer()
    count_fv = vectorizer.fit_transform(util.word2sentence(docs))
    tfidf_fv = tfidf_transformer.fit_transform(count_fv)

    num_features = model.syn0.shape[1]

    manager = Manager()
    global_word_set = manager.dict(util.get_word_vec_dict(model))
    global_vocabulary = manager.dict(vectorizer.vocabulary_);
    global_doc_vector = mp.Array('d', doc_num*num_features, lock=False)

    pool = mp.Pool(initializer=initprocess, initargs=[global_doc_vector])

    index = 0
    # test(docs[0], global_word_set, 0, doc_num, global_vocabulary, global_doc_vector, global_tfidf_fv)
    for words in docs:
        pool.apply_async(single_av_tf_idf_dv, [words, global_word_set, index, doc_num, global_vocabulary, tfidf_fv[index]])
        index += 1

    pool.close()
    pool.join()

    doc_vector = np.frombuffer(global_doc_vector).reshape((doc_num, num_features))
    if save:
        np.save(save_file, doc_vector)
    return doc_vector
Example #3
0
def run():
    # build the mdp
    start = time.time()
    room_size = 3
    num_rooms = 5
    mdp = maze_mdp.MazeMDP(room_size=room_size, num_rooms=num_rooms)

    # build the agent
    m = Manager()
    init_dict = {(s, a): 0 for s in mdp.states for a in mdp.ACTIONS + [None]}
    shared_weights = m.dict(init_dict)
    shared_value_weights = m.dict(init_dict)
    agent = async_actor_critic.AsyncActorCritic(actions=mdp.ACTIONS, discount=mdp.DISCOUNT, 
        weights=shared_weights, value_weights=shared_value_weights, tau=.3, learning_rate=.5)

    # build a single experiment
    rewards = m.list()
    start_state_values = m.list()
    max_steps = (2 * room_size * num_rooms) ** 2
    exp = experiment.Experiment(mdp=mdp, agent=agent, num_episodes=800, max_steps=max_steps,
        rewards=rewards, start_state_values=start_state_values)

    # run the experiment
    multiexperiment = experiment.MultiProcessExperiment(experiment=exp, num_agents=NUM_PROCESSES)
    multiexperiment.run()

    # report results
    end = time.time()
    print 'took {} seconds to converge'.format(end - start)
    mdp.print_state_values(shared_value_weights)
    optimal = mdp.EXIT_REWARD + (2 * room_size * num_rooms * mdp.MOVE_REWARD)
    utils.plot_values(rewards, optimal, 'rewards')
    utils.plot_values(start_state_values, optimal, 'start state value')
def process_job_parallel(scheduler, job, nr_cores, nr_iter, parameters = None):
    Logger.log_level = 2
    processes = []
    manager = Manager()
    return_values = manager.dict()
    extremes = manager.dict()
    start_time = datetime.datetime.now()
    for i in range(nr_cores):
        p = Process(target=worker, args=(i, nr_cores, scheduler, job, nr_iter, return_values, extremes, parameters,))
        processes.append(p)
        p.start()

    for process in processes:
        process.join()

    #reduce
    results = []
    for value in return_values.values():
        for entry in value:
            results.append(entry)

    min = None
    max = None

    for extreme in extremes.values():
        if min is None or extreme[0].total_time < min.total_time:
            min = extreme[0]
        if max is None or extreme[1].total_time > max.total_time:
            max = extreme[1]
    Logger.warning("Min: %s" % min.total_time)
    Logger.warning("Max: %s" % max.total_time)

    duration = datetime.datetime.now() - start_time
    Logger.warning("Simulation  complete. Duration: %s" % (duration))
    return results, (min,max)
 def __init__(self, config):
     '''*config* can be obtained from the function :func:`cloudfusion.store.sugarsync.sugarsync_store.SugarsyncStore.get_config`,
     but you need to add user and password::
     
         config = SugarsyncStore.get_config()
         config['user'] = '******' #your account username/e-mail address
         config['password'] = '******' #your account password
     
     Or you can use a configuration file that already has password and username set by specifying a path::
     
         path_to_my_config_file = '/home/joe/MySugarsync.ini'       
         config = get_config(path_to_my_config_file)
     
     :param config: dictionary with key value pairs'''
     #self.dir_listing_cache = {}
     self._logging_handler = 'sugarsync'
     self.logger = logging.getLogger(self._logging_handler)
     self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger)
     manager = Manager()
     self.path_cache = manager.dict()
     # use a lock for synchronized appends
     self._dir_listing_cache = manager.dict()
     self._dir_listing_cache_lock = RLock()
     self._last_partial_cache = manager.list()
     self._time_of_last_partial_cache = 0
     #error handling for authorization error
     self.root = config["root"]
     try:
         self.client = SugarsyncClient(config)
     except Exception, e:
         raise StoreAutorizationError(repr(e), 0)
def main_parallel():
    Component.resetPfKeeping()
    Component.resetCostKeeping()

    manager = Manager()
    Component.pfkeeping = manager.dict(Component.pfkeeping)
    Component.costkeeping = manager.dict(Component.costkeeping)

    pool = Pool(processes=3)
    toolbox.register("map", pool.map)

    print "MULTIOBJECTIVE OPTIMIZATION: parallel version"
    start_delta_time = time.time()

    # optimization
    random.seed(64)

    npop = 100
    ngen = 50

    stats = tools.Statistics(key=lambda ind: ind.fitness.values)
    stats.register("avg", np.mean, axis=0)
    stats.register("std", np.std, axis=0)
    stats.register("min", np.min, axis=0)
    stats.register("max", np.max, axis=0)
    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "avg", "std", "min", "max"

    pop = toolbox.population(n=npop)
    fits = toolbox.map(toolbox.evaluate, pop)
    for fit,ind in zip(fits, pop):
        ind.fitness.values = fit

    nevals = npop
    allpop = []
    for gen in range(ngen):
        allpop = allpop+pop
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=nevals, **record)
        print(logbook.stream)

        offspring = algorithms.varOr(pop, toolbox, lambda_=npop, cxpb=0.5, mutpb=0.1)
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        nevals = len(invalid_ind)
        fits = toolbox.map(toolbox.evaluate, invalid_ind)
        for fit,ind in zip(fits, invalid_ind):
            ind.fitness.values = fit
        pop = toolbox.select(offspring+pop, k=npop)

    front = toolbox.sort(allpop, k=int(ngen*npop), first_front_only=True)

    pool.close()
    pool.join()

    delta_time = time.time() - start_delta_time
    print 'DONE: {} s'.format(str(datetime.timedelta(seconds=delta_time)))

    return allpop, logbook, front
Example #7
0
def main():
	parser = argparse.ArgumentParser(description='Takes deduplicated bam files and preprocess\'s for analysis\n')
	parser.add_argument('-c', '--config', help='Conditions containing Sam/Bam files, values are naming', required=True)
	parser.add_argument('-g', '--genome', help='Genome the samples are aligned to, options include	 mm10/mm9/hg19', required=True)
	parser.add_argument('-o', '--outdir', help='Output directory, will create transdense, nfree and npres directories', required=True)
	parser.add_argument('-t', '--threads', help='threads, default=1', default=1, required=False)
	parser.add_argument('-b', action='store_true', help='Use if Config contains bam files', required=False) 
	parser.add_argument('-n', action='store_true', help='Runs just nfree <60 and >60', required=False) 
	if len(sys.argv)==1:
		parser.print_help()
		sys.exit(1)
	args = vars(parser.parse_args())

	Config = ConfigParser.ConfigParser()
	Config.optionxform = str
	Config.read(args["config"])
	conditions = ConfigSectionMap("Conditions", Config)

	chrom = pkg_resources.resource_filename('pyatactools', 'data/{}.chrom.sizes'.format(args["genome"]))
	if not os.path.isfile(chrom):
		raise Exception("Unsupported Genome!")

	transdense_dir = os.path.join(args["outdir"], "transdense")
	nfree_dir = os.path.join(args["outdir"], "nfree")
	npres_dir = os.path.join(args["outdir"], "npres")
	pool = Pool(int(args["threads"]))

	if not os.path.isdir(transdense_dir):
		os.makedirs(transdense_dir)
		os.makedirs(nfree_dir)
		os.makedirs(npres_dir)
	
	ddup_bams = list(conditions.keys())
	if args["n"]:
		manager = Manager()
		return_dict = manager.dict()
		pool = Pool(int(args["threads"]))
		return_dict = manager.dict()
		nfree_dir1 = os.path.join(args["outdir"], "nfree_small")
		nfree_dir2 = os.path.join(args["outdir"], "nfree_large")
		if not os.path.isdir(nfree_dir1):
			os.makedirs(nfree_dir1)
			os.makedirs(nfree_dir2)
		pool.map(function5, itertools.izip(ddup_bams, itertools.repeat(nfree_dir1),itertools.repeat(nfree_dir2), itertools.repeat(return_dict)))
		pool.map(function4, itertools.izip(list(return_dict.keys()), itertools.repeat(chrom)))
	else:
		manager = Manager()
		return_dict = manager.dict()
		pool = Pool(int(args["threads"]))
		pool.map(function1, itertools.izip(ddup_bams, itertools.repeat(transdense_dir), itertools.repeat(return_dict)))
		pool.map(function4, itertools.izip(list(return_dict.keys()), itertools.repeat(chrom)))
		return_dict = manager.dict()
		pool.map(function2, itertools.izip(ddup_bams, itertools.repeat(nfree_dir), itertools.repeat(return_dict)))
		pool.map(function4, itertools.izip(list(return_dict.keys()), itertools.repeat(chrom)))
		return_dict = manager.dict()
		pool.map(function3, itertools.izip(ddup_bams, itertools.repeat(npres_dir), itertools.repeat(return_dict)))
		pool.map(function4, itertools.izip(list(return_dict.keys()), itertools.repeat(chrom)))
Example #8
0
def launch_multiprocess(launchpad, fworker, loglvl, nlaunches, num_jobs, sleep_time,
                        total_node_list=None, ppn=1, timeout=None, exclude_current_node=False,
                        local_redirect=False):
    """
    Launch the jobs in the job packing mode.

    Args:
        launchpad (LaunchPad)
        fworker (FWorker)
        loglvl (str): level at which to output logs
        nlaunches (int): 0 means 'until completion', -1 or "infinite" means to loop forever
        num_jobs(int): number of sub jobs
        sleep_time (int): secs to sleep between rapidfire loop iterations
        total_node_list ([str]): contents of NODEFILE (doesn't affect execution)
        ppn (int): processors per node (doesn't affect execution)
        timeout (int): # of seconds after which to stop the rapidfire process
        exclude_current_node: Don't use the script launching node as a compute node
        local_redirect (bool): redirect standard input and output to local file
    """
    # parse node file contents
    if exclude_current_node:
        host = get_my_host()
        l_dir = launchpad.get_logdir() if launchpad else None
        l_logger = get_fw_logger('rocket.launcher', l_dir=l_dir, stream_level=loglvl)
        if host in total_node_list:
            log_multi(l_logger, "Remove the current node \"{}\" from compute node".format(host))
            total_node_list.remove(host)
        else:
            log_multi(l_logger, "The current node is not in the node list, keep the node list as is")
    node_lists, sub_nproc_list = split_node_lists(num_jobs, total_node_list, ppn)

    # create shared dataserver
    ds = DataServer.setup(launchpad)
    port = ds.address[1]

    manager = Manager()
    running_ids_dict = manager.dict()
    firing_state_dict = manager.dict()

    # launch rapidfire processes
    processes = start_rockets(fworker, nlaunches, sleep_time, loglvl, port, node_lists,
                              sub_nproc_list, timeout=timeout, running_ids_dict=running_ids_dict,
                              local_redirect=local_redirect, firing_state_dict=firing_state_dict)
    FWData().Running_IDs = running_ids_dict
    FWData().FiringState = firing_state_dict

    # start pinging service
    ping_stop = threading.Event()
    ping_thread = threading.Thread(target=ping_multilaunch, args=(port, ping_stop))
    ping_thread.start()

    # wait for completion
    for p in processes:
        p.join()
    ping_stop.set()
    ping_thread.join()
    ds.shutdown()
Example #9
0
def plot_utrs(conditions, rev_conds, outdir):
	pool = Pool(24)
	manager = Manager()
	return_dict = manager.dict()
	genes = manager.dict()
	pool.map(function1, itertools.izip(list(conditions.keys()), itertools.repeat(outdir), itertools.repeat(return_dict), itertools.repeat(genes)))
	combined_profiles = {}
	normal = {}
	#Have to add all conditions together
	today = date.today()
	date_format = "{}_{}_{}".format(today.day, today.month, today.year)
	pp = PdfPages("{}/{}_UTR_averaged.pdf".format(outdir, date_format))

	#First need to average over all genes:
	averaged_profiles = {}
	len_genes = {}
	for key1, key2 in genes.keys(): #Chromosome, bam
		if key2 not in len_genes:
			len_genes[key2] = {}
			len_genes[key2][key1] = 1
		else:
			len_genes[key2][key1] = 1

	for key in return_dict.keys():
		averaged_profiles[key] = return_dict[key]/len(len_genes[key].keys())
	
	for key in rev_conds:
		fig = pyplot.figure()
		pyplot.rc('axes', color_cycle=['b','r', 'c', 'm', 'y', 'k', 'gray', "green"])
		for fasta in rev_conds[key]:
			name = re.sub(".fa", "", fasta)
			name = os.path.basename(name)
			uniq_count = read_reports('{}/{}_report.txt'.format(outdir, name))
			normal[fasta] = uniq_count
			norm = 100000/float(uniq_count)
			normalised_profile = norm * averaged_profiles[fasta]
			if key not in combined_profiles:
				combined_profiles[fasta] = normalised_profile
			else:
				combined_profiles[fasta] += normalised_profile

			pyplot.plot( numpy.arange( -200, 200 ), normalised_profile, label=name)
		pyplot.legend(prop={'size':6})
		pyplot.title(key)  
		pp.savefig(fig)
		pyplot.close()
	fig = pyplot.figure()
	pyplot.rc('axes', color_cycle=['b','r', 'c', 'm', 'y', 'k', 'gray', "green"])
	for key in combined_profiles:
		name = re.sub(".fa", "", key)
		name = os.path.basename(name)
		pyplot.plot( numpy.arange( -200, 200 ), combined_profiles[key], label=name)  
	pyplot.legend(prop={'size':6})
	pp.savefig(fig)
	pp.close()
Example #10
0
File: F1.py Project: gisce/libCNMC
    def __init__(self, **kwargs):
        """
        F1 class constructor

        :param codi_r1: R1 code of the company
        :type codi_r1: str
        :param year: Year of the resolution
        :type year: int
        """

        super(F1, self).__init__(**kwargs)
        self.codi_r1 = kwargs.pop('codi_r1')
        self.year = kwargs.pop('year', datetime.now().year - 1)
        manager = Manager()
        self.cts = manager.dict()
        self.cnaes = manager.dict()
        self.base_object = 'CUPS'
        self.report_name = 'F1 - CUPS'
        self.reducir_cups = kwargs.get("reducir_cups", False)
        mod_all_year = self.connection.GiscedataPolissaModcontractual.search([
            ("data_inici", "<=", "{}-01-01".format(self.year)),
            ("data_final", ">=", "{}-12-31".format(self.year))],
            0, 0, False, {"active_test": False}
        )
        mods_ini = self.connection.GiscedataPolissaModcontractual.search(
            [("data_inici", ">=", "{}-01-01".format(self.year)),
            ("data_inici", "<=", "{}-12-31".format(self.year))],
            0, 0, False, {"active_test": False}
        )
        mods_fi = self.connection.GiscedataPolissaModcontractual.search(
            [("data_final", ">=", "{}-01-01".format(self.year)),
            ("data_final", "<=", "{}-12-31".format(self.year))],
            0, 0, False, {"active_test": False}
        )
        self.modcons_in_year = set(mods_fi + mods_ini + mod_all_year)
        self.default_o_cod_tfa = None
        self.default_o_cnae = None
        search_params = [
            ('name', '=', 'libcnmc_4_2015_default_f1')
        ]
        id_config = self.connection.ResConfig.search(
            search_params
        )

        self.generate_derechos = kwargs.pop("derechos", False)

        if id_config:
            config = self.connection.ResConfig.read(id_config[0], [])
            default_values = literal_eval(config['value'])
            if default_values.get('o_cod_tfa'):
                self.default_o_cod_tfa = default_values.get('o_cod_tfa')
            if default_values.get('o_cnae'):
                self.default_o_cnae = default_values.get('o_cnae')
Example #11
0
    def _get(self, args):
        draft_id = args[0]
        id = args[1] if len(args) > 1 else None

        q = self.db.query(Player)
        if id is not None:
            player = q.filter(Player.id == int(id)).first()
            team = self.db.query(Team).filter(and_(Team.is_owner == True,
                                                   Team.draft_id == draft_id)).first()

            available_players = self.db.query(Player).join(Player.core).filter(and_(PlayerCore.rank != None,
                                                                                    PlayerCore.target_price != None,
                                                                                    PlayerCore.points > 0,
                                                                                    Player.draft_id == draft_id,
                                                                                    Player.team_id == None,
                                                                                    Player.id != player.id)).order_by(PlayerCore.rank).all()

            min_price = 1
            max_price = min(player.core.target_price + 21, team.money)
            manager = Manager()
            max_starters_points = manager.dict()
            max_bench_points = manager.dict()
            pool = Pool(processes=8)
            starters, bench = get_starters_and_bench(self.db, team.id)
            max_starters_points[0] = optimizer.optimize_roster(starters, available_players, team.money - (constants.BENCH_SIZE - len(bench)))[1]
            for m in range(min_price, 10):
                pool.apply_async(wrap_optimizer, args=(starters, available_players, team.money - m - (constants.BENCH_SIZE - len(bench)) + 1, max_bench_points, m))

            full_starters = True
            for s in starters:
                if s is None:
                    full_starters = False
            if not full_starters:
                starters_clone = list(starters)
                bench_clone = list(bench)
                place_player(player, starters_clone, bench_clone)
                for m in range(min_price, max_price):
                    pool.apply_async(wrap_optimizer, args=(starters_clone, available_players, team.money - m - (constants.BENCH_SIZE - len(bench_clone)), max_starters_points, m))

            pool.close()
            pool.join()

            ret = player.to_dict(['core'])
            ret['max_starters_points'] = dict(max_starters_points)
            ret['max_bench_points'] = dict(max_bench_points)

            return ret
        else:
            players = q.join(PlayerCore).filter(and_(Player.draft_id == int(draft_id),
                                                     PlayerCore.rank != None,
                                                     PlayerCore.target_price != None)).all()
            return {'players': [p.to_dict(['core']) for p in players]}
Example #12
0
 def __init__(self, **kwargs):
     super(CreateCelles, self).__init__(**kwargs)
     self.header = [
         'name', 'tipus_element', 'installacio', 'tipus_posicio',
         'inventari', 'aillament', 'cini', 'propietari', 'perc_financament',
         'tensio'
     ]
     self.search_keys = [('name')]
     self.fields_read_ct = ['perc_financament', 'propietari']
     self.fields_read_at_tram = ['perc_financament']
     self.object = self.connection.GiscedataCellesCella
     manager = Manager()
     self.cts = manager.dict()
     self.at_suports = manager.dict()
Example #13
0
def parallel_peak_file_plot(ibams, bed_file, size_dict, halfwinwidth, norm, controls):
	positions = set()
	for line in open(bed_file):
		fields = line.split( "\t" )
		name = re.sub("chr", "", fields[0])
		window = HTSeq.GenomicInterval( name, int(fields[1]), int(fields[2]), "." )
		positions.add(window)
	if controls == None:
		manager = Manager()
		return_dict = manager.dict()
		pool = Pool(8)
		if norm: #Normalisation provided
			pool.map(read_bam_function, itertools.izip(list(ibams.keys()), itertools.repeat(positions), itertools.repeat(halfwinwidth), 
				itertools.repeat(return_dict), itertools.repeat(norm), itertools.repeat(None))) ##Running annotation in parallel
		else:
			pool.map(read_bam_function, itertools.izip(list(ibams.keys()), itertools.repeat(positions), itertools.repeat(halfwinwidth), 
				itertools.repeat(return_dict), itertools.repeat(None), itertools.repeat(size_dict)))
		pool.close()
		pool.join()		
		for key in return_dict.keys():
			pyplot.plot( numpy.arange( -halfwinwidth, halfwinwidth ), return_dict[key], label=ibams[key])  
		pyplot.legend(prop={'size':8})
		pyplot.savefig("Average_peak_profile.pdf".format(ibams[key]))
	else:
		manager = Manager()
		return_dict = manager.dict()
		pool = Pool(8)
		pool.map(read_bam_function, itertools.izip(list(ibams.keys()), itertools.repeat(positions), itertools.repeat(halfwinwidth), 
				itertools.repeat(return_dict), itertools.repeat(None), itertools.repeat(None)))
		control_dict = manager.dict()
		control_bam = []
		for key in controls:
			control_bam.append(controls[key])
		control_sizes = sam_size(control_bam)
		pool = Pool(8)
		pool.map(read_bam_function, itertools.izip((control_bam), itertools.repeat(positions), itertools.repeat(halfwinwidth), 
			itertools.repeat(control_dict),  itertools.repeat(None),  itertools.repeat(None))) 
		pool.close()
		pool.join()	
	#	colors = ["b", "g", "r", "y", "k"] #To make it more robust, just use default colors
		c = 0
		for key in return_dict.keys():
			control = controls[key]
			new_profile = return_dict[key] - control_dict[control] #Unsure if working properly, maybe make it more intelligent?
			gapdh = read_counts(norm[key])
			constant = 1000/float(gapdh)
			new_profile = new_profile*constant
			pyplot.plot( numpy.arange( -halfwinwidth, halfwinwidth ), new_profile, label=ibams[key])#, color=colors[c])
		pyplot.legend(prop={'size':8})
		pyplot.savefig("Average_peak_profile.pdf".format(ibams[key]))
Example #14
0
    def run_parallel(
            self, test_suites, test_runner, result_type=None,
            results_path=None):

        exit_code = 0
        proc = None
        unittest.installHandler()
        processes = []
        manager = Manager()
        results = manager.dict()
        manager.dict()
        start = time.time()

        test_mapping = {}
        for test_suite in test_suites:
            # Give each test suite an uuid so it can be
            # matched to the correct test result
            test_id = str(uuid.uuid4())
            test_mapping[test_id] = test_suite

            proc = Process(
                target=self.execute_test,
                args=(test_runner, test_id, test_suite, results))
            processes.append(proc)
            proc.start()

        for proc in processes:
            proc.join()

        finish = time.time()

        errors, failures, _ = self.dump_results(start, finish, results)

        if result_type is not None:
            all_results = []
            for test_id, result in results.items():
                tests = test_mapping[test_id]
                result_parser = SummarizeResults(
                    vars(result), tests, (finish - start))
                all_results += result_parser.gather_results()

            reporter = Reporter(
                result_parser=result_parser, all_results=all_results)
            reporter.generate_report(
                result_type=result_type, path=results_path)

        if failures or errors:
            exit_code = 1

        return exit_code
Example #15
0
File: test.py Project: originye/OVS
def controller_switch_failure_test():
    s = ["s5"]
    clear_config(s)
    manager1 = Manager()
    manager2 = Manager()

    Q1 = manager1.dict()
    failure1 = manager1.Value("i", 0)
    failed_list1 = manager1.list([])
    PID1 = manager1.list(["%02d" % i for i in xrange(1, 51)])
    s1 = manager1.list(["s5"])
    Q2 = manager2.dict()
    failure2 = manager2.Value("i", 0)
    failed_list2 = manager2.list([])
    PID2 = manager2.list(["%02d" % i for i in xrange(1, 51)])
    s2 = manager2.list(["s5"])
    processes = []
    process1 = mp.Process(target=policy_update, args=(s1, "1", Q1, PID1, failure1, failed_list1))
    processes.append(process1)
    process2 = mp.Process(target=controller_failure_detection, args=(s1, "1", failure1, failed_list1))
    processes.append(process2)
    process = mp.Process(target=upon_new_policy, args=(s1, "1", Q1, PID1))
    processes.append(process)
    process3 = mp.Process(target=policy_update, args=(s2, "2", Q2, PID2, failure2, failed_list2))
    processes.append(process3)
    process4 = mp.Process(target=controller_failure_detection, args=(s2, "2", failure2, failed_list2))
    processes.append(process4)
    process5 = mp.Process(target=upon_new_policy, args=(s2, "2", Q2, PID2))
    processes.append(process5)
    # Run processes
    for p in processes:
        p.start()
        print "STARTING:", p, p.is_alive()
    time.sleep(10)
    print "terminated"
    processes[3].terminate()
    processes[4].terminate()
    print "sleeping"
    time.sleep(5)
    processes[5].terminate()
    print "terminated 5"
    time.sleep(2)

    for p in processes:
        print "TERMINATED:", p, p.is_alive()

    for p in processes:
        p.join()
        print "JOINED:", p, p.is_alive()
Example #16
0
def main(data):
    """
    Launches the server with given set of keys (data)
    """
    #this library needs to be replaced
    ned = rsa.newKey(10**100,10**101,50)
    print "Keys\nn = %s\ne = %s\nd = %s\n" % (ned[0],ned[1],ned[2])

    #create thread safe dictionaries
    manager = Manager()
    ID_KEY = manager.dict()
    ID_KEY.update(data) #get saved data
    ID_STATUS = manager.dict()
    ID_SOCK = manager.dict()

    #define an interupt catcher to save data
    def signal_handler(signal, frame):
        print('Saving data')
        #this is ugly but the only reasonable way I could find
        #to unroll a managed threadsafe dictionary to a normal one
        #that can interact with pickle correctly
        with lock:
            with open('serverdata.pkl', 'wb') as f:
                temp = ast.literal_eval(str(ID_KEY))
                pickle.dump( temp, f ) #save the keys
        sys.exit(0)
    signal.signal(signal.SIGINT, signal_handler)

    #listen on port 8000 (probably a bad choice, usually used for testing web servers) for incoming connections
    s = socket.socket()
    hostname = socket.gethostname()
    port = 8000
    s.bind(('',port))
    s.listen(5)
    
    #listen for incoming connections and start a thread based on what kind it is
    while True:
        c, addr =  s.accept()
        c.send("init")
        v = c.recv(4)
        if "user" == v:
            #incomming connection is a user which will send queries
            Process(target=process_client, args=(c,ID_KEY,ID_STATUS,ID_SOCK, ned)).start()
        elif "circ" == v:
            #incoming connection is to be used as part of a circuit
            Process(target=process_circuit, args=(c,ID_KEY,ID_STATUS,ID_SOCK,ned)).start()
        elif "data" == v:
            #incoming connection has some data for the server to process
            Process(target=process_data, args=(c,ID_SOCK,ned)).start()
Example #17
0
    def search(self, links=False):
        """
        Get links from the search engines and fill them to the respective lists.
        It gets self.pages of links from Search Engines, sends them to the formatter functions and gets the lists.
        :return: nothing
        """
        if self.type == "text":
            mg = Manager()
            ret = mg.dict()
            jobs = []
            p1 = Process(target=self.google_proc, args=(ret,))
            jobs.append(p1)
            p2 = Process(target=self.yahoo_proc, args=(ret,))
            jobs.append(p2)
            p3 = Process(target=self.bing_proc, args=(ret,))
            jobs.append(p3)
            p1.start()
            p2.start()
            p3.start()

            for proc in jobs:
                proc.join()

            temp = ret.values()[0] + ret.values()[1] + ret.values()[2]
            print temp
            for i in temp:
                f = 0
                for j in self.uniquelinks:
                    if i[1] == j[1]:
                        f = 1
                if f == 0:
                    self.uniquelinks.append(i)
            if links:
                return self.uniquelinks
            else:  # [[title, link, data], [title, link, data] ...]
                mg = Manager()
                ret = mg.dict()
                jobs = []
                n = 0
                for li in self.uniquelinks[0:3]:
                    p = Process(target=self.data_collector, args=(n, li[1], ret))
                    n += 1
                    jobs.append(p)
                    p.start()

                for proc in jobs:
                    proc.join()
                print ret.values()
                print len(ret.values())
Example #18
0
def build_all_indexes(page_titles, wiki_index, process_limit=4):
    """
    This function runs link_indexer and token_indexer together in a single loop,
    instead of making one (costly!) pass for each using seperate functions.
    """
    manager = Manager()
    link_index = manager.dict()
    token_index = manager.dict()
    lock = manager.Lock()

    page_titles = [i.lower() for i in page_titles]

    run(page_titles, all_indexer_instance, (wiki_index, link_index, token_index, lock), process_limit=process_limit)

    return {'link_index': link_index, 'token_index': token_index}
Example #19
0
def get_hls_stream(m3u8_url, concurrency=1, live=True, loop=1, segment_sleep=1, authentication=None, timeouts=None):
  # Spawn concurrent subprocesses to get every HLS segment of stream

  # Disable all SSL Warnings (version dependent)
  try:
    requests.packages.urllib3.disable_warnings()
    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
    requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
  except:
    pass
  
  # Configurables
  subprocesses = []
  process_id = 0
  timeout    = {'read': float(timeouts['read']), 
                'connect': float(timeouts['connect']), 
                'sleep': float(segment_sleep)}
  manager    = Manager()
  lock       = manager.Lock()
  durations  = manager.list()
  success    = manager.dict()
  results    = manager.dict()
  status     = manager.dict()
  playlists  = manager.dict()

  # Cookies for session authentication
  if authentication:
    cookies = (authentication['url'], authentication['username'], authentication['password'], authentication['type'])
  else:
    cookies = None

  # Spawn parallel subprocesses for each simulated client
  for x in range(0, int(concurrency)):
    process_id += 1
    p = Process(target=get_playlist, args=(m3u8_url, live, loop, results, status, success, durations, playlists, timeout, cookies, lock, process_id,))
    subprocesses.append(p)
    p.start()

  # Wait for all processes to complete
  for subprocess in subprocesses:
    while True:
      response_times = calculated_response_times(durations)
      yield results._getvalue(), status._getvalue(), response_times, success._getvalue()
      
      time.sleep(1)
      if not subprocess.is_alive():
        yield results._getvalue(), status._getvalue(), response_times, success._getvalue()
        break
Example #20
0
 def scanner_network(self,gateway):
     scan = ''
     config_gateway = gateway.split('.')
     del config_gateway[-1]
     for i in config_gateway:
         scan += str(i) + '.'
     gateway = scan
     ranger = str(self.ip_range.text()).split('-')
     jobs = []
     manager = Manager()
     on_ips = manager.dict()
     for n in xrange(int(ranger[0]),int(ranger[1])):
         ip='%s{0}'.format(n)%(gateway)
         p = Process(target=self.working,args=(ip,on_ips))
         jobs.append(p)
         p.start()
     for i in jobs: i.join()
     for i in on_ips.values():
         Headers = []
         n = i.split('|')
         self.data['IPaddress'].append(n[0])
         self.data['MacAddress'].append(n[1])
         self.data['Hostname'].append('<unknown>')
         for n, key in enumerate(reversed(self.data.keys())):
             Headers.append(key)
             for m, item in enumerate(self.data[key]):
                 item = QTableWidgetItem(item)
                 item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
                 self.tables.setItem(m, n, item)
     Headers = []
     for key in reversed(self.data.keys()):
         Headers.append(key)
     self.tables.setHorizontalHeaderLabels(Headers)
Example #21
0
def main():
    init_params()
    vk = connect_to_vk(LOGIN, PASSWORD)
    audio_list = vk.method('audio.get', {})

    total = len(audio_list)

    if not os.path.exists(DOWNLOAD_DIR):
        os.makedirs(DOWNLOAD_DIR)

    manager = Manager()
    workers_list = []
    progress_list = manager.dict()
    downloaded_tracks = manager.Value('i', 0)
    lock = Lock()

    for f in audio_list[:WORKERS_COUNT - 1]:
        start_download_process(f, workers_list, progress_list, downloaded_tracks, lock)

    del audio_list[:WORKERS_COUNT - 1]

    while any(worker.is_alive() for worker in workers_list) or len(audio_list):
        if audio_list and len(workers_list) < WORKERS_COUNT:
            f = audio_list.pop(0)
            start_download_process(f, workers_list, progress_list, downloaded_tracks, lock)
        print_progress(progress_list, downloaded_tracks.value, total)
        clean_workers(workers_list)
        time.sleep(0.1)
    print "Done!"
Example #22
0
def getData():
    if os.path.isfile("chat_urls.p"):
        chat_urls = pickle.load( open( "chat_urls.p", "rb" ) )
    else:
        chat_urls = {}
        for user in users:
            chat_urls[user] = get_urls(user)
        teams_url = "http://espn.go.com/mlb/teams"
        pickle.dump( chat_urls, open( "chat_urls.p", "wb" ) )

    # for user in chat_urls:
    #     urls = chat_urls[user]
    #     for url in urls:
    #         getLog(url)
    logDB = {}
    for user in chat_urls:
        logDB[user] = {}
    p = Pool(20)
    i=0
    manager = Manager()
    db = manager.dict()
    for user in chat_urls:
        for url in chat_urls[user]:
            i+=1
            p.apply_async(addLogData, args=(url,db))
    p.close()
    p.join()
    out = db._getvalue()
    outfile = open("rawChat.txt","wb")
    for url in out:
        outfile.write(out[url]+"\n")
Example #23
0
    def __init__(self, firefox=None, email=None, senha=None, pasta=None):
        """'firefox' é o caminho para o binário do Firefox a ser usado.
        'pasta' é o caminho para a pasta onde salvar os downloads."""
        self.firefox = firefox
        self.pasta = pasta
        self.email = email
        self.senha = senha

        self.navegador = None
        self.app = None
        self.logger = None

        manager = Manager()
        self.safe_dict = manager.dict()
        self.clear_captcha()
        self.stop()

        self.try_break_audio_captcha = True
        self.nome_audio_captcha = "somCaptcha.wav"
        self.recognizer = sr.Recognizer(str('pt-BR'))

        self.user_agent = (
            "User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:28.0)"
            " Gecko/20100101  Firefox/28.0"
        )
        self.base_url = 'http://esic.prefeitura.sp.gov.br'
        self.login_url = self.base_url + '/Account/Login.aspx'

        self.logado = False
        self.ja_tentou_cookies_salvos = False
        self.rodar_apenas_uma_vez = False
def concurrent_test(robot, rooms, num_trials, start_location = -1, chromosome = None):
    """
    Run the tests in multiple processes. Can be directly swapped out for testAllMaps.
    """
    # Setup variables
    num_rooms    = len(rooms)               # Total number of rooms
    total_trials = num_trials * num_rooms   # Total number of trials
    processes    = []                       # List for all processes
    manager      = Manager()                # Manager to handle result transfer
    dict         = manager.dict()           # Dict which will store results
    
    # Create a process for each room, storing parameters in instance variables
    for i, room in enumerate(rooms):
        process = SimulationProcess(i, dict)
        process.robot          = robot
        process.room           = room
        process.num_trials     = num_trials
        process.start_location = start_location
        process.chromosome     = chromosome
        process.start()
        processes.append(process)
    #end for

    # Print the results
    total_score = 0
    for i, process in enumerate(processes):
        process.join()
        (score, std) = dict[i]
        print("Room %d of %d done (score: %d std: %d)" % (i + 1, num_rooms, score, std))
        total_score += score
    #end for
    
    print("Average score over %d trials: %d" % (total_trials, total_score / num_rooms))
    return total_score / num_rooms
#end concurrent_test
Example #25
0
def run(args):
    # Limit it to a single GPU.
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    conn = create_db(args.db)
    m = Manager()

    logs = args.logging
    datasets = args.datasets
    embeddings = args.embeddings
    settings = args.settings

    # So we don't litter the fs
    dir_ = tempfile.mkdtemp(prefix='baseline-speed-test-')

    try:
        configs = get_configs(args.config)
        if not args.single:
            full_configs = []
            for config in configs:
                full_configs.extend(edit_config(config, args.frameworks, args.no_crf, args.no_attn))
            configs = full_configs
        if args.verbose:
            for config in configs:
                pprint(config)
                print()
            print()
        steps = len(configs)
        pg = create_progress_bar(steps)
        for config in configs:
            write_config = deepcopy(config)
            config['train']['epochs'] = args.trials
            task_name = config['task']

            system_info = m.dict()
            p = Process(
                target=run_model,
                args=(
                    system_info,
                    config,
                    logs,
                    settings,
                    datasets,
                    embeddings,
                    task_name,
                    dir_,
                    int(args.gpu)
                )
            )
            p.start()
            pid = p.pid
            p.join()
            log_file = os.path.join(dir_, 'timing-{}.log'.format(pid))
            speeds = parse_logs(log_file)

            save_data(conn, speeds, write_config, system_info)
            pg.update()
        pg.done()
    finally:
        shutil.rmtree(dir_)
Example #26
0
def func_thread():
    a = numpy.random.rand(1000000)
    b = numpy.random.rand(1000000)

    nodata = 0.3

    print "here"
    manager = Manager()
    lock = Lock()
    d = manager.dict()
    ps = []
    start_time = time.clock()
    for i in numpy.where((a > 0.7) & (a < 0.9) & (a != nodata)):
        for j in numpy.where((b > 0.5) & (b < 0.9) & (b != nodata)):

            index = numpy.intersect1d(i, j)
            length = len(index)/2
            array1 = index[:length]
            array2 = index[length:]
            for processes in range(2):
                p = Process(target=f_thread, args=(d, a, b, array1, lock))
                ps.append(p)
                p.start()

            for p in ps:
                p.join()

    print time.clock() - start_time, "seconds"
    print len(d)
Example #27
0
    def __init__(self,port):
        manager = Manager()
        self.status=manager.dict()
        self.sendbuf=manager.list()
        self.p = Process(target=SocketManager, args=(port,self.status,self.sendbuf) )
	self.p.daemon=True
        self.p.start()
Example #28
0
def sync():
    from multiprocessing import Manager
    from common import bounty, settings, peers
    from common.safeprint import safeprint
    man = Manager()
    items = {'config':man.dict(),
             'peerList':man.list(),
             'bountyList':man.list(),
             'bountyLock':bounty.bountyLock,
             'keyList':man.list()}
    items['config'].update(settings.config)
    items['peerList'].extend(peers.peerlist)
    items['bountyList'].extend(bounty.bountyList)
    safeprint(items)
    safeprint(items.get('bountyList'))
    safeprint(items.get('keyList'))
    if items.get('config') is not None:
        from common import settings
        settings.config = items.get('config')
    if items.get('peerList') is not None:
        global peerList
        peers.peerlist = items.get('peerList')
    if items.get('bountyList') is not None:
        from common import bounty
        bounty.bountyList = items.get('bountyList')
    if items.get('bountyLock') is not None:
        from common import bounty
        bounty.bountyLock = items.get('bountyLock')
    return items
Example #29
0
def main():
    if len(sys.argv) > 1:
        print "cmd arg to set directory to: " + sys.argv[1]
        os.chdir(sys.argv[1])

    print "cwd is: " + os.getcwd()

    # make sure we have the correct device

    keepTrying = True
    countCurrent = 0
    countCurrentFail = 0

    manager = Manager()
    sharedDictionary = manager.dict()

    while keepTrying:
        serial0 = serial.Serial("/dev/ttyACM0")  # connection to arduino1
        serial1 = serial.Serial("/dev/ttyACM1")  # connection to arduino2

        try:
            line = ser.readline()  # read ardiono about once every two seconds
            I = float(line.split(" ")[1].strip())  # get the current reading
            countCurrent += 1
        except Exception, e:
            countCurrentFail += 1

        if countCurrent > countCurrentFail + 5:  # 5 good readings
            keepTrying = False
            startThreading(sharedDictionary, serial0, serial1)
        elif countCurrentFail > countCurrent + 5:  # 5 bad readings, do a swap
            keepTrying = False
            startThreading(sharedDictionary, serial1, serial0)

        print " . " + str(countCurrent) + "-" + str(countCurrentFail)
def processFiles(patch_dir):
    root = os.getcwd()
    glbl.data_dirs = {}
    if root != patch_dir: working_path = root+"/"+patch_dir
    else: working_path = root

    for path, dirs, files in os.walk(working_path):
        if len(dirs) == 0: glbl.data_dirs[path] = ''
    

    # Multiprocessing Section
    #########################################
    Qids = glbl.data_dirs.keys()
    manager = Manager()                                      # creates shared memory manager object
    results = manager.dict()                                 # Add dictionary to manager, so it can be accessed across processes
    nextid = Queue()                                         # Create Queue object to serve as shared id generator across processes
    for qid in Qids: nextid.put(qid)                         # Load the ids to be tested into the Queue
    for x in range(0,multiprocessing.cpu_count()):           # Create one process per logical CPU
        p = Process(target=processData, args=(nextid,results)) # Assign process to processCBR function, passing in the Queue and shared dictionary
        glbl.jobs.append(p)                                   # Add the process to a list of running processes
        p.start()                                             # Start process running
    for j in glbl.jobs:
        j.join()                                              # For each process, join them back to main, blocking on each one until finished
    
    # write out results
    c = 1
    sets = results.keys()
    sets.sort()
    for x in sets:
        if results[x] != 'None':
            FINAL = open('result'+str(c)+'.txt','w')
            n = "\n************************************************************************************************\n"
            FINAL.write(n+"* "+x+'    *\n'+n+results[x]+"\n")
            FINAL.close()     
            c += 1
Example #31
0
class Airscan():

    _instance = None

    def __init__(self):
        self.manager = Manager()
        self.aps = self.manager.list()
        self.asoc = self.manager.list()
        self.nasoc = self.manager.list()
        self.flags = self.manager.dict()

    def packet_handler(self, pkt):
        if pkt.haslayer(Dot11):
            Dot11Layer = pkt.getlayer(Dot11)
            if Dot11Layer.addr1 == Dot11Layer.addr3 and Dot11Layer.addr1 != 'ff:ff:ff:ff:ff:ff':
                client = "%s %s" % (Dot11Layer.addr1, Dot11Layer.addr2)
                if client not in self.asoc:
                    self.asoc.append(client)
                    print "%s%s %s%s" % (RED, Dot11Layer.addr1,
                                         Dot11Layer.addr2, END)

            if pkt.haslayer(Dot11ProbeReq):
                Dot11ProbeReqLayer = pkt.getlayer(Dot11ProbeReq)
                if len(Dot11ProbeReqLayer.info) > 0:
                    client = "%s %s" % (Dot11Layer.addr2,
                                        Dot11ProbeReqLayer.info)
                    if client not in self.nasoc:
                        self.nasoc.append(client)
                        print "%s%s %s%s" % (YEL, Dot11Layer.addr2,
                                             Dot11ProbeReqLayer.info, END)

            if pkt.haslayer(Dot11Beacon):
                Dot11BeaconLayer = pkt.getlayer(Dot11Beacon)
                if Dot11Layer.addr2 and (Dot11Layer.addr2 not in self.aps):
                    self.aps.append(Dot11Layer.addr2)
                    channel = int(ord(pkt[Dot11Elt:3].info))
                    cap = pkt.sprintf(
                        "{Dot11Beacon:%Dot11Beacon.cap%}\{Dot11ProbeResp:%Dot11ProbeResp.cap%}"
                    )
                    if re.search("privacy", cap):
                        enc = 'Y'
                    else:
                        enc = 'N'

                    rssi = -(256 - ord(pkt.notdecoded[-4:-3]))
                    print "%s%d %s %d %s %s%s" % (BLU, channel, enc, rssi,
                                                  Dot11Layer.addr2,
                                                  Dot11BeaconLayer.info, END)

    def keep_handler(self, pkt):
        return self.flags['stop_sniff']

    def channel_hop(self):
        for channel in range(1, 14):
            os.system("iw dev %s set channel %d" % (self.interface, channel))
            time.sleep(0.7)
        self.flags['stop_sniff'] = True
        self.aps[:] = []
        self.asoc[:] = []
        self.nasoc[:] = []

    def signal_handler(self, signal, frame):
        self.flags['stop_sniff'] = True
        self.p.terminate()
        self.p.join()

    def run(self, iface):
        self.interface = iface

        self.flags['stop_sniff'] = False
        self.p = Process(target=self.channel_hop)
        self.p.start()
        signal.signal(signal.SIGINT, self.signal_handler)
        sniff(iface=self.interface,
              prn=self.packet_handler,
              stop_filter=self.keep_handler)
        self.p.terminate()
        self.p.join()

        target = raw_input("Enter target bssid fully or partially:")

        airjam = Airjam(self.interface, target)
        airjam.run()

    def getInstance():
        if Airscan._instance == None:
            Airscan._instance = Airscan()

        return Airscan._instance

    getInstance = staticmethod(getInstance)
from multiprocessing import Manager

def set_exit_handler(func):
    signal.signal(signal.SIGTERM, func)
    
def on_exit(sig, func=None):
    print("exit handler triggered")
    sys.exit(1)
    
if __name__ == '__main__':
    set_exit_handler(on_exit)
    options = None
    # grab options, setup
    envArgs = os.getenv("ARELLE_ARGS")
    manager = Manager()
    output = manager.dict()
    
    if envArgs:
        args = shlex.split(envArgs)
    else:
        args = sys.argv[1:]
    try:
        numthreads = int(args[args.index('--xule-numthreads')+1])
    except ValueError:
        numthreads = 1
    gettext.install("arelle") # needed for options messages

    print("Initializing Server")
    cntlr = CntlrCmdLine.parseAndRun(args)
    cntlr.startLogging(logFileName='logToBuffer')
    # get generated options from controller
from multiprocessing import Manager
from threading import Lock
from typing import Any, Iterable, Optional

manager = Manager()
shared_memory: dict[str, Any] = manager.dict()
lock: Lock = manager.Lock()


class Db:
    _cache: dict[str, Any]
    _lock: Lock

    def __init__(self) -> None:
        self._cache = shared_memory
        self._lock = lock

    @property
    def cache(self) -> dict[str, Any]:
        return self._cache

    def connect(self) -> dict[str, Any]:
        return self.cache

    def close(self) -> None:
        self.clear()

    def get(self, key: str) -> Optional[Any]:
        return self.cache.get(key, None)

    def all(self) -> Iterable[Any]:
Example #34
0
    
    y_predict=cross_val_predict(svm.SVC(kernel='rbf',C=C,gamma=gamma,),X_train,y_train,cv=cross_validation_value,n_jobs=CPU_value)
    y_predict_prob=cross_val_predict(svm.SVC(kernel='rbf',C=C,gamma=gamma,probability=True),X_train,y_train,cv=cross_validation_value,n_jobs=CPU_value,method='predict_proba')
    input_file = input_file.replace(".csv","")
    y_predict_path = input_file + "_predict.csv"
    y_predict_proba_path = input_file + "_predict_proba.csv"
    share_y_predict_dict[input_file] = y_predict
    share_y_predict_proba_dict[input_file] = y_predict_prob[:,1]
    pd.DataFrame(y_predict).to_csv(y_predict_path, header = None, index = False)
    pd.DataFrame(y_predict_prob[:,1]).to_csv(y_predict_proba_path, header = None, index = False)
    print("子进程终止>>> pid={0}".format(os.getpid()))
        
if __name__=="__main__":
    print("主进程执行中>>> pid={0}".format(os.getpid()))
    manager = Manager()
    share_y_predict_dict = manager.dict()
    share_y_predict_proba_dict = manager.dict()
    ps=[]
    if default_l == 1:
        data = ""
        x_len = 1000
        y_len = 1000
        file_len = len(input_files)
        threshold = file_len/2
        for index, input_file in enumerate(input_files):
            data = pd.read_csv(input_file,header=None)
            (x_len,y_len) = data.shape

            X_train = data.iloc[:,0:y_len-1]
            y_train = data.iloc[:,[y_len-1]]
            X_train = X_train.values
Example #35
0
from playback import *
from volume import *

def setup_pins():
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)

def reset_recordings(channel):
    global recordings
    metronome(recordings)

if __name__ == "__main__":
    manager = Manager()
    
    # Initialize shared variables for recordings and volumes
    recordings = manager.dict()
    volumes = manager.dict()

    # Metronome initializes the recordings with a metronome on channel 7
    metronome_proc = Process(target=metronome, args=(recordings,))
    metronome_proc.start()
    metronome_proc.join()
    
    # Sets volumes to 127 initially
    setVolumes = Process(target=set_volumes, args=(volumes,))
    setVolumes.start()
    setVolumes.join()

    setup_pins()
    GPIO.add_event_detect(17, GPIO.FALLING, callback=reset_recordings, bouncetime=300)
Example #36
0
def convert_html_to_pdf(template_file, pdf_name, dirName, dirName_Output):
    pdfkit.from_file(os.path.join(os.path.join(dirName, template_file)),
                     os.path.join(os.path.join(dirName_supp, pdf_name)),
                     options=options_supp)


############################################################################################################################
# Run script
#################################################

if __name__ == "__main__":
    utility.clean_all()
    createdirs(dirNames)
    manager = Manager()  # create only 1 mgr
    d = manager.dict()  # create only 1 dict
    report = WriteReport(args.f)
    template_dict = report.run_entry_composition(Template_Dict)
    template_dict, clashscore, rama, sidechain, exv_data = report.run_model_quality(
        template_dict)
    template_dict, sas_data, sas_fit = report.run_sas_validation(template_dict)
    cx_fit, template_dict = report.run_cx_validation(template_dict)
    report.run_quality_glance(clashscore, rama, sidechain, exv_data, sas_data,
                              sas_fit, cx_fit)
    report.run_sas_validation_plots(template_dict)
    write_pdf(args.f, template_dict, template_pdf, dirNames['pdf'],
              dirNames['pdf'])
    template_dict = report.run_supplementary_table(template_dict,
                                                   location=args.ls,
                                                   physics=physics,
                                                   method_details=args.m,
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from multiprocessing import Process
from multiprocessing import Manager

import time


def f1(i, dic):
    dic[i] = 200 + i
    time.sleep(i)
    print(dic.values())


if __name__ == '__main__':  #进程间默认不能共用内存
    manager = Manager()
    dic = manager.dict()  #这是一个特殊的字典

    for i in range(10):
        p = Process(target=f1, args=(i, dic))
        p.start()
        p.join()
Example #38
0

def add_routes(app):
    app.router.add_static('/static', path=PROJECT_ROOT)
    app.router.add_get('/remote', store_handlers.get_remote_manifest)
    app.router.add_get('/local', store_handlers.get_local_manifest)
    app.router.add_get('/modules/{module}/{version}/readme',
                       store_handlers.get_module_readme)
    app.router.add_post('/install', install_module)
    app.router.add_post('/uninstall', store_handlers.uninstall_module)
    app.router.add_get('/installstream', get_install_stream)


if __name__ == '__main__':
    manager = Manager()
    INSTALL_STATE = manager.dict()
    SSE_UPDATE_CONDITION = manager.Condition()
    SSE_UPDATE_EVENT = manager.Event()
    INSTALL_STATE['stage'] = ''
    INSTALL_STATE['message'] = ''
    INSTALL_STATE['module_name'] = ''
    INSTALL_STATE['module_version'] = ''
    INSTALL_STATE['cur_chunk'] = 0
    INSTALL_STATE['total_chunks'] = 0
    INSTALL_STATE['cur_size'] = 0
    INSTALL_STATE['total_size'] = 0
    INSTALL_STATE['update_time'] = time.time()
    install_worker = Process(target=install_from_queue,
                             args=(INSTALL_QUEUE, INSTALL_STATE,
                                   SSE_UPDATE_EVENT))
    install_worker.start()
Example #39
0
def main():

    args = parse_arguments()

    manager = Manager()
    performance_data = manager.dict()

    with open(args.test_config) as test_config:
        test_conf = json.loads(test_config.read())

    _, file_name = ntpath.split(args.test_config)
    measure_id, _ = os.path.splitext(file_name)

    log.info("Starting performance tests for: " + measure_id)

    # block processes before measuring queries
    # wait for all processes to store generated results
    store_done = multiprocessing.Event()

    wait = test_conf.get("wait_for_store", False)
    if wait:
        store_done.set()

    proc_done_counter = Value('i', args.job_num)
    proc_counter_lock = Lock()

    procs = [
        Process(target=measure,
                args=(test_conf, performance_data, store_done,
                      proc_done_counter, proc_counter_lock, args.keep))
        for i in range(args.job_num)
    ]

    for proc in procs:
        proc.start()

    log.info("Measuring report storage time.")
    if wait:
        while proc_done_counter.value != 0:
            time.sleep(1)
            sys.stdout.write('.')
            sys.stdout.flush()

        sys.stdout.write('\n')

    log.info("Measuring queries ...")
    store_done.set()

    for proc in procs:
        proc.join()

    log.info("Measuring queries done.")

    if not test_conf.get("clean_after_fill", True) and not args.keep:

        view_host, view_port = get_viewer_host_port(test_conf)

        del_procs = [
            Process(target=delete_results,
                    args=(view_host, view_port, run_id, performance_data))
            for run_id in performance_data.keys()
        ]

        for proc in del_procs:
            proc.start()

        for proc in del_procs:
            proc.join()

    if not len(performance_data):
        log.info("There are no measurements results")
        sys.exit(0)

    file_name = measure_id + '.csv'

    if not os.path.exists(args.output):
        os.makedirs(args.output)
    output = os.path.join(args.output, file_name)
    with open(output, 'w+') as out:
        process_perf_data(performance_data, out)
    log.info('Measurement results:\n' + str(output))
Example #40
0
    def download(self, project, directory_path, is_forced):
        process_results = []
        success_data_count = 0
        success_labels_count = 0
        data_error_results = {}
        label_error_results = {}
        # get data count
        main_label_count, labels = self.label_manager.get_labels(
            project.id, label_type='DEFAULT', page=1, page_size=1)
        # get labels count
        total_label_count, labels = self.label_manager.get_labels(project.id,
                                                                  page=1,
                                                                  page_size=1)

        #Download project configuration
        try:
            project_config_path = os.path.join(directory_path, 'project.json')
            with open(project_config_path, 'w') as input:
                json.dump(project.label_interface, input, indent=4)
            is_download_project_config = True
        except Exception as e:
            is_download_project_config = False

        if main_label_count != 0:
            page_length = int(
                main_label_count / LABEL_DESCRIBE_PAGE_SIZE
            ) if main_label_count % LABEL_DESCRIBE_PAGE_SIZE == 0 else int(
                main_label_count / LABEL_DESCRIBE_PAGE_SIZE) + 1
            if not is_forced:
                if not click.confirm(
                        f"Downloading {main_label_count} data and {total_label_count} labels from project '{project.name}' to '{directory_path}'. Proceed?"
                ):
                    return
            manager = Manager()
            label_results_dict = manager.dict()
            label_results = manager.list([label_results_dict] * page_length)
            process_results = manager.list(range(page_length))
            with Pool(NUM_MULTI_PROCESS) as p:
                list(
                    tqdm.tqdm(p.imap(
                        _download_worker,
                        zip([self.label_manager] * page_length,
                            [project.id] * page_length, range(page_length),
                            [directory_path] * page_length, label_results,
                            [process_results] * page_length)),
                              total=page_length))

            for key in label_results_dict.keys():
                results_error = label_results_dict[key]['error']
                success_data_count += 1
                success_labels_count += label_results_dict[key][
                    'success_labels_count']
                if 'data' in results_error:
                    data_error_results[key] = results_error['data']
                if 'label' in results_error:
                    label_error_results[key] = results_error['label']

        console.print('\n[b blue]** Result Summary **[/b blue]')
        console.print(
            f'Download of project configuration - {"[b blue]Success[/b blue]" if is_download_project_config else "[b red]Fail[/b red]"}'
        )
        console.print(
            f'Successful download of {success_labels_count} out of {total_label_count} labels. ({round(success_labels_count/total_label_count*100,2)}%) - [b red]{total_label_count - success_labels_count} ERRORS[/b red]'
        )
        console.print(
            f'Successful download of {success_data_count} out of {main_label_count} data. ({round(success_data_count/main_label_count*100,2)}%) - [b red]{main_label_count - success_data_count} ERRORS[/b red]'
        )

        # TODO: Need to refactor Get Labels API Fail Logic
        for process_result in process_results:
            is_process_fail = False
            if not process_result:
                is_process_fail = True
        if is_process_fail:
            console.print(
                f'[b red]Failed Download Labels from API. Please retry download.[/b red]'
            )

        self._print_error_table(
            data_error_results=data_error_results,
            label_error_results=label_error_results,
        )
Example #41
0
    def upload_data(self, project, dataset_name, directory_path, include_label,
                    is_forced):
        imgs_path = recursive_glob_image_files(directory_path)
        if not is_forced:
            if not click.confirm(
                    f"Uploading {len(imgs_path)} data and {len(recursive_glob_label_files(directory_path)) if include_label else 0 } labels to dataset '{dataset_name}' under project '{project.name}'. Proceed?"
            ):
                return
        asset_images = []
        manager = Manager()
        if len(imgs_path) != 0:
            for key in imgs_path:
                file_name = key
                asset_image = {
                    'file': imgs_path[key],
                    'file_name': file_name,
                    'data_key': key,
                    'dataset': dataset_name
                }
                asset_images.append(asset_image)
            data_results = manager.list([manager.dict()] * len(asset_images))
            console.print(f"Uploading data:")
            with Pool(NUM_MULTI_PROCESS) as p:
                list(
                    tqdm.tqdm(p.imap(
                        _upload_asset,
                        zip([project.id] * len(asset_images), asset_images,
                            data_results)),
                              total=len(asset_images)))
        else:
            data_results = [{}]

        label_results = None
        if include_label:
            labels_path = recursive_glob_label_files(directory_path)
            console.print(f"Uploading labels:")
            if len(labels_path) != 0:
                label_results = manager.list([manager.dict()] *
                                             len(labels_path))
                with Pool(NUM_MULTI_PROCESS) as p:
                    list(
                        tqdm.tqdm(p.imap(
                            _update_label,
                            zip([self.label_manager] * len(labels_path),
                                labels_path, [project.id] * len(labels_path),
                                [project.label_interface] * len(labels_path),
                                [dataset_name] * len(labels_path),
                                label_results)),
                                  total=len(labels_path)))
            else:
                label_results = [{}]

        console.print('\n[b blue]** Result Summary **[/b blue]')
        success_data_count = len(asset_images) - len(data_results[0])
        data_success_ratio = round(
            success_data_count / len(asset_images) *
            100, 2) if len(data_results[0]) != 0 else 100
        console.print(
            f'Successful upload of {success_data_count} out of {len(asset_images)} data. ({data_success_ratio}%) - [b red]{len(data_results[0])} ERRORS[/b red]'
        )

        if include_label:
            success_label_count = len(labels_path) - len(label_results[0])
            label_success_ratio = round(
                success_label_count / len(labels_path) *
                100, 2) if len(label_results[0]) != 0 else 100
            console.print(
                f'Successful upload of {success_label_count} out of {len(labels_path)} labels. ({label_success_ratio}%) - [b red]{len(label_results[0])} ERRORS[/b red]'
            )
            self._print_error_table(data_error_results=dict(data_results[0]),
                                    label_error_results=dict(label_results[0]))
        else:
            self._print_error_table(data_error_results=dict(data_results[0]))
    def generatePathwaysList(self):
        """
        This function gets a list of selected compounds and the list of matched genes and
        find out all the pathways which contain at least one feature.

        @param {type}
        @returns
        """
        from multiprocessing import Process, cpu_count, Manager
        from math import ceil


        #****************************************************************
        # Step 1. GET THE KEGG DATA AND PREPARE VARIABLES
        #****************************************************************
        inputGenes = self.getInputGenesData().values()
        inputCompounds = self.getInputCompoundsData().values()
        pathwayIDsList = KeggInformationManager().getAllPathwaysByOrganism(self.getOrganism())

        #GET THE IDS FOR ALL PATHWAYS FOR CURRENT SPECIE
        enrichmentByOmic = {x.get("omicName"): x.get("featureEnrichment", False) for x in self.getGeneBasedInputOmics() + self.getCompoundBasedInputOmics()}

        totalFeaturesByOmic, totalRelevantFeaturesByOmic = self.calculateTotalFeaturesByOmic(enrichmentByOmic)
        totalInputMatchedCompounds = len(self.getInputCompoundsData())
        totalInputMatchedGenes = len(self.getInputGenesData())
        totalKeggPathways = len(pathwayIDsList)

        mappedRatiosByOmic = self.getMappedRatios()

        #****************************************************************
        # Step 2. FOR EACH PATHWAY OF THE SPECIE, CHECK IF THERE IS ONE OR
        #         MORE FEATURES FROM THE INPUT (USING MULTITHREADING)
        #****************************************************************
        # try:
        #     #CALCULATE NUMBER OF THREADS
        #     nThreads = min(cpu_count(), MAX_THREADS)
        # except NotImplementedError as ex:
        #     nThreads = MAX_THREADS
        nThreads = MAX_THREADS
        logging.info("USING " + str(nThreads) + " THREADS")

        def matchPathways(jobInstance, pathwaysList, inputGenes, inputCompounds, totalFeaturesByOmic, totalRelevantFeaturesByOmic, matchedPathways, mappedRatiosByOmic, enrichmentByOmic):
            #****************************************************************
            # Step 2.1. FOR EACH PATHWAY IN THE LIST, GET ALL FEATURE IDS
            #           AND CALCULATE THE SIGNIFICANCE FOR THE PATHWAY
            #****************************************************************
            keggInformationManager = KeggInformationManager()

            genesInPathway = compoundsInPathway = pathway = None
            for pathwayID in pathwaysList:
                genesInPathway, compoundsInPathway = keggInformationManager.getAllFeatureIDsByPathwayID(jobInstance.getOrganism(), pathwayID)
                isValidPathway, pathway = self.testPathwaySignificance(genesInPathway, compoundsInPathway, inputGenes, inputCompounds, totalFeaturesByOmic, totalRelevantFeaturesByOmic, mappedRatiosByOmic, enrichmentByOmic)
                if(isValidPathway):
                    pathway.setID(pathwayID)
                    pathway.setName(keggInformationManager.getPathwayNameByID(jobInstance.getOrganism(), pathwayID))
                    pathway.setClassification(keggInformationManager.getPathwayClassificationByID(jobInstance.getOrganism(), pathwayID))
                    pathway.setSource(keggInformationManager.getPathwaySourceByID(jobInstance.getOrganism(), pathwayID))

                    matchedPathways[pathwayID] = pathway

        manager=Manager()
        matchedPathways=manager.dict() #WILL STORE THE OUTPUT FROM THE THREADS
        nPathwaysPerThread = int(ceil(len(pathwayIDsList)/nThreads)) + 1  #GET THE NUMBER OF PATHWAYS TO BE PROCESSED PER THREAD
        pathwaysListParts = chunks(pathwayIDsList, nPathwaysPerThread) #SPLIT THE ARRAY IN n PARTS
        threadsList = []
        #LAUNCH THE THREADS
        for pathwayIDsList in pathwaysListParts:
            thread = Process(target=matchPathways, args=(self, pathwayIDsList, inputGenes, inputCompounds, totalFeaturesByOmic, totalRelevantFeaturesByOmic, matchedPathways, mappedRatiosByOmic, enrichmentByOmic))
            threadsList.append(thread)
            thread.start()

        #WAIT UNTIL ALL THREADS FINISH
        for thread in threadsList:
            thread.join(MAX_WAIT_THREADS)

        isFinished = True
        for thread in threadsList:
            if(thread.is_alive()):
                isFinished = False
                thread.terminate()
                logging.info("THREAD TERMINATED IN generatePathwaysList")


        if not isFinished:
            raise Exception('Your data took too long to process and it was killed. Try it again later or upload smaller files if it persists.')

        self.setMatchedPathways(dict(matchedPathways))
        totalMatchedKeggPathways=len(self.getMatchedPathways())

        # Get the adjusted p-values (they need to be passed as a whole)
        pvalues_list = defaultdict(dict)
        combined_pvalues_list = defaultdict(dict)

        for pathway_id, pathway in self.getMatchedPathways().iteritems():
            for omic, pvalue in pathway.getSignificanceValues().iteritems():
                pvalues_list[omic][pathway_id] = pvalue[2]

            for method, combined_pvalue in pathway.getCombinedSignificancePvalues().iteritems():
                combined_pvalues_list[method][pathway_id] = combined_pvalue

        adjusted_pvalues = {omic: adjustPvalues(omicPvalues) for omic, omicPvalues in pvalues_list.iteritems()}
        adjusted_combined_pvalues = {method: adjustPvalues(methodCombinedPvalues) for method, methodCombinedPvalues in combined_pvalues_list.iteritems()}

        # Set the adjusted p-value on a pathway basis
        for pathway_id, pathway in self.getMatchedPathways().iteritems():
            for omic, pvalue in pathway.getSignificanceValues().iteritems():
                pathway.setOmicAdjustedSignificanceValues(omic, {adjust_method: pvalues[pathway_id] for adjust_method, pvalues in adjusted_pvalues[omic].iteritems()})

            for method, combined_pvalue in pathway.getCombinedSignificancePvalues().iteritems():
                 pathway.setMethodAdjustedCombinedSignificanceValues(method, {adjust_method: combined_pvalues[pathway_id] for adjust_method, combined_pvalues in adjusted_combined_pvalues[method].iteritems()})

        logging.info("SUMMARY: " + str(totalMatchedKeggPathways) +  " Matched Pathways of "  + str(totalKeggPathways) + "in KEGG; Total input Genes = " + str(totalInputMatchedGenes) + "; SUMMARY: Total input Compounds  = " + str(totalInputMatchedCompounds))

        for key in totalFeaturesByOmic:
            logging.info("SUMMARY: Total " + key + " Features = " + str(totalFeaturesByOmic.get(key)))
            logging.info("SUMMARY: Total " + key + " Relevant Features = " + str(totalRelevantFeaturesByOmic.get(key)))

        self.summary= [totalKeggPathways, totalMatchedKeggPathways, totalInputMatchedGenes, totalInputMatchedCompounds, totalFeaturesByOmic, totalRelevantFeaturesByOmic]
        #TODO: REVIEW THE SUMMARY GENERATION
        return self.summary
Example #43
0
    pw.start()
    pr.start()
    pw.join()
    pr.terminate()

    print '---' * 20
    num = Value('d', 0.0)
    arr = Array('i', range(10))

    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print num.value
    print arr[:]

    print '--' * 20

    manager = Manager()
    d = manager.dict()
    ll = manager.list(range(10))

    p = Process(target=manage_f, args=(d, ll))
    p2 = Process(target=manage_f2, args=(d, ll))
    p.start()
    p2.start()
    p.join()

    print d
    print ll
Example #44
0
                    help="1 for relative 0 for absolute")
    args = vars(ap.parse_args())
    initializeUser(args['user'])

    global calibration
    calibrationIndexes = [
        'position', 'gain', 'threshL0', 'threshL1', 'getBg', 'threshH', 'yth',
        'plane', 'depth', 'final'
    ]
    calibration = {}
    for i in xrange(len(calibrationIndexes)):
        calibration[calibrationIndexes[i]] = i

    # variables for multi-processing
    manager = Manager()
    key = manager.dict()  # get keyboard events
    kill = manager.dict()  # killing other processes or not

    key['status'] = False
    key['info'] = ""

    kill['kill'] = False

    if args['step'] == 'final':
        p = Process(target=keylogger.log, args=(key, kill))
        p.start()

    global timers
    timers = [Timer() for i in xrange(4)]
    mainLoop(calibration[args['step']], kill, key, args['user'], args['mode'])
Example #45
0
    def run(self):

        if self._mode == 'batch':

            manager = Manager()
            self._logger_state = manager.dict()
            self._node_states = manager.dict()
            self._node_state_update_timestamps = manager.dict()
            self._context = zmq.Context()

            self._session_id = np.random.randint(10000, 11000)
            nodes = []
            node_clients = []

            self._node_token = NodeToken(0, self._context, 'tcp://127.0.0.1', 18534, 28534)

            for i in range(1, self._num_threads+1):
                token = NodeToken(i, self._context, 'tcp://127.0.0.1', 18534 + i, 28534)
                node = NodeClient(token)
                node_clients.append(node)
                self._node_states[token.node_id] = rc.STATUS_CODE_NOT_READY
                self._node_state_update_timestamps[token.node_id] = datetime.now()
                rct = ReCoDeNode(token, self._calibration_frame, self._init_params, self._input_params)
                p = Process(target=rct.run,
                            args=(self._session_id, token,
                                  self._node_states, self._node_state_update_timestamps, self._logger_state))
                nodes.append(p)
                p.start()

            time.sleep(0.1)

            # start logger
            token = NodeToken(-1, self._context, 'tcp://127.0.0.1', -1, 28534)
            logger = Logger(token)
            self._logger_state = {0: rc.STATUS_CODE_NOT_READY}
            logger_process = Process(target=logger.start, args=(self._session_id, self._logger_state))
            logger_process.start()

            time.sleep(0.1)

            # connect to logger
            self._pub_socket = self._context.socket(zmq.PUSH)
            self._pub_socket.connect(self._node_token.ip_address + ":" + str(self._node_token.publishing_port))
            self._log(rc.MESSAGE_TYPE_INFO_RESPONSE, 'Welcome to this session')

            # connect to RC nodes
            for i in range(self._num_threads):
                node_clients[i].connect()

            self._broadcast(node_clients, self._session_id, 1, 'start')
            self._broadcast(node_clients, self._session_id, 2, 'process_file')

            # close RC nodes
            self._broadcast(node_clients, self._session_id, 3, 'close')
            for i in range(self._num_threads):
                node_clients[i].close()
                nodes[i].join()

            # close logger
            self._log(rc.MESSAGE_TYPE_INFO_RESPONSE, 'close')
            logger_process.join()
Example #46
0
def outlier_detection(data, data_type, parameters):
    """
    Performs outlier detection on data of given data_type according to specified parameters.

    Input:
    
        data (dict): input data. 
                     example: data = {1: {'segments': record 1 ECG segments, 'R': record 1 r peaks}, 
                                     2: {'segments': record 2 ECG segments, 'R': record 2 r peaks},
                                     ...}
        
        data_type (string): data type to be analyzed
        
        parameters (dict): filter parameters.
                           Example: parameters = {'method': 'dbscan', ...}
                                                  
    Output:
        
        output (dict): output data where keys correspond to record id numbers.
                       example: output = { 1: {-1: record 1 outlier indexes, '0': record 1 cluster 0 indexes},
                                           2: {-1: record 2 outlier indexes, '0': record 2 cluster 0 indexes},
                                           ...}

    Configurable fields:{"name": "??.??", "config": {}, "inputs": ["data", "data_type", "parameters"], "outputs": ["output"]}

    See Also:

    Notes:

    Example:
       
    """
    if parameters['method'] == 'dbscan':
        method = outlier.outliers_dbscan
    else:
        raise TypeError, "Method %s not implemented." % parameters['method']
    # create work queue
    work_queue = Queue()
    manager = Manager()
    output = manager.dict()
    output['info'] = parameters
    parameters.pop('method')
    # fill queue
    for recid in data.keys():
        if recid == 'info': continue
        work_queue.put({
            'function': method,
            'data': data.get(recid).get('segments'),
            'parameters': parameters,
            'recid': recid
        })
    # create N processes and associate them with the work_queue and do_work function
    processes = [
        Process(target=do_work, args=(
            work_queue,
            output,
        )) for _ in range(NUMBER_PROCESSES)
    ]
    # lauch processes
    for p in processes:
        p.start()
    # wait for processes to finish
    print "waiting ..."
    for p in processes:
        p.join()
    print "wait is over ..."
    for p in processes:
        p.terminate()

    return output
    trials2 = []
    for folder in os.listdir("smooths/second/"):
        for filename in os.listdir("smooths/second/" + folder):
            trials2.append("smooths/second/" + folder + "/" + filename)
    trials3 = []
    for folder in os.listdir("smooths/third/"):
        for filename in os.listdir("smooths/third/" + folder):
            trials3.append("smooths/third/" + folder + "/" + filename)
    corpus = []

    with open(processing.dataset_folder + "test_set.txt") as file:
        for l in file.readlines():
            corpus.append(l.strip())

    manager = Manager()
    returns = manager.dict()
    for id in tqdm(range(0, len(trials1), 5)):

        results = []
        ids = id
        p1 = Process(target=processing.compute_tagging,
                     args=(corpus, first_lex_in, first_lex_out, second_lex_in,
                           second_lex_out, automaton1, automaton2,
                           class_cluster, trials1[ids], trials2[ids], returns,
                           trials1[ids]))
        p1.start()
        ids += 1
        p2 = Process(target=processing.compute_tagging,
                     args=(corpus, first_lex_in, first_lex_out, second_lex_in,
                           second_lex_out, automaton1, automaton2,
                           class_cluster, trials1[ids], trials2[ids], returns,
Example #48
0
import sys
reload(sys)
sys.setdefaultencoding('utf-8')

__author__ = 'but0n'
from multiprocessing import Pool, Manager
from bs4 import BeautifulSoup
import time, random, requests, sqlite3, os

server = Manager()
host = 'http://www.80s.tw'
screen = server.dict({'label' : 'NONE', 'url' : 'http://baidu.com', 'title':'none', 'IMG':'none', 'detail':'none', 'link':'none', 'index':0, 'total':10})
def mLog(opt):
    os.system('clear')
    print('\033[41;30m MESSAGE: %s\033[m' % opt['label'])
    print('\033[46;30m PATH: %10s\033[m\n' % opt['url'])

    print('\033[0;35m TITLE\033[m:\t%s' % opt['title'])
    print('\033[0;35m IMG\033[m:\t%s' % opt['IMG'][:30]+'...')
    print('\033[0;34m DETAIL\033[m:%s' % opt['detail'][:60]+'...')
    print('\033[0;36m LINK\033[m:\t%s' % opt['link'][:60]+'...')

    bar_status = opt['index']*40/opt['total']
    status = opt['index']*100/opt['total']
    print('\n[%-40s]%s(%d/%d)' % ('>'*bar_status, str(status)+'%', opt['index'], opt['total']))


class domPa(object):
    def __init__(self, path, section = 'a', title = '.title', img = '.img', detail = '.detail'):
        self.path = path
        self.page = requests.get(host+path)
Example #49
0

# bar
def bar(name, data):
    print(name)
    for i in range(3):
        data[i] = i
        print("Tick")
        time.sleep(1)
    print(data)


if __name__ == '__main__':
    # Start bar as a process
    manager = Manager()
    data = manager.dict()
    p = multiprocessing.Process(target=bar, args=["test", data])
    p.start()

    # Wait for 10 seconds or until process finishes
    p.join(5)

    # If thread is still active
    if p.is_alive():
        print("running... let's kill it...")

        # Terminate
        p.terminate()
        p.join()
    print(data)
Example #50
0
class SQLDB:

    PRAGMAS = """
        pragma journal_mode=WAL;
    """

    CREATE_CLAIM_TABLE = """
        create table if not exists claim (
            claim_hash bytes primary key,
            claim_id text not null,
            claim_name text not null,
            normalized text not null,
            txo_hash bytes not null,
            tx_position integer not null,
            amount integer not null,
            timestamp integer not null, -- last updated timestamp
            creation_timestamp integer not null,
            height integer not null, -- last updated height
            creation_height integer not null,
            activation_height integer,
            expiration_height integer not null,
            release_time integer not null,

            short_url text not null, -- normalized#shortest-unique-claim_id
            canonical_url text, -- channel's-short_url/normalized#shortest-unique-claim_id-within-channel

            title text,
            author text,
            description text,

            claim_type integer,
            reposted integer default 0,

            -- streams
            stream_type text,
            media_type text,
            fee_amount integer default 0,
            fee_currency text,
            duration integer,

            -- reposts
            reposted_claim_hash bytes,

            -- claims which are channels
            public_key_bytes bytes,
            public_key_hash bytes,
            claims_in_channel integer,

            -- claims which are inside channels
            channel_hash bytes,
            channel_join integer, -- height at which claim got valid signature / joined channel
            signature bytes,
            signature_digest bytes,
            signature_valid bool,

            effective_amount integer not null default 0,
            support_amount integer not null default 0,
            trending_group integer not null default 0,
            trending_mixed integer not null default 0,
            trending_local integer not null default 0,
            trending_global integer not null default 0
        );

        create index if not exists claim_normalized_idx on claim (normalized, activation_height);
        create index if not exists claim_channel_hash_idx on claim (channel_hash, signature, claim_hash);
        create index if not exists claim_claims_in_channel_idx on claim (signature_valid, channel_hash, normalized);
        create index if not exists claim_txo_hash_idx on claim (txo_hash);
        create index if not exists claim_activation_height_idx on claim (activation_height, claim_hash);
        create index if not exists claim_expiration_height_idx on claim (expiration_height);
    """

    CREATE_SUPPORT_TABLE = """
        create table if not exists support (
            txo_hash bytes primary key,
            tx_position integer not null,
            height integer not null,
            claim_hash bytes not null,
            amount integer not null
        );
        create index if not exists support_claim_hash_idx on support (claim_hash, height);
    """

    CREATE_TAG_TABLE = """
        create table if not exists tag (
            tag text not null,
            claim_hash bytes not null,
            height integer not null
        );
        create unique index if not exists tag_claim_hash_tag_idx on tag (claim_hash, tag);
    """

    CREATE_CLAIMTRIE_TABLE = """
        create table if not exists claimtrie (
            normalized text primary key,
            claim_hash bytes not null,
            last_take_over_height integer not null
        );
        create index if not exists claimtrie_claim_hash_idx on claimtrie (claim_hash);
    """

    SEARCH_INDEXES = """
        -- used by any tag clouds
        create index if not exists tag_tag_idx on tag (tag, claim_hash);

        -- common ORDER BY
        create unique index if not exists claim_effective_amount_idx on claim (effective_amount, claim_hash, release_time);
        create unique index if not exists claim_release_time_idx on claim (release_time, claim_hash);
        create unique index if not exists claim_trending_global_mixed_idx on claim (trending_global, trending_mixed, claim_hash);
        create unique index if not exists claim_trending_group_mixed_idx on claim (trending_group, trending_mixed, claim_hash);
        create unique index if not exists filter_fee_amount_order_release_time_idx on claim (fee_amount, release_time, claim_hash);

        create unique index if not exists claim_type_trending_idx on claim (claim_type, trending_global, trending_mixed, claim_hash);
        create unique index if not exists claim_type_release_idx on claim (claim_type, release_time, claim_hash);
        create unique index if not exists claim_type_effective_amount_idx on claim (claim_type, effective_amount, claim_hash);

        create unique index if not exists channel_hash_release_time_idx on claim (channel_hash, release_time, claim_hash);
        create unique index if not exists channel_hash_trending_idx on claim (channel_hash, trending_global, trending_mixed, claim_hash);
        create unique index if not exists channel_hash_trending_idx on claim (channel_hash, effective_amount, claim_hash);

        create unique index if not exists filter_stream_duration_idx on claim (duration, trending_global, trending_mixed, claim_hash);

        -- TODO: verify that all indexes below are used
        create index if not exists claim_height_normalized_idx on claim (height, normalized asc);

        create index if not exists claim_resolve_idx on claim (normalized, claim_id);

        create index if not exists claim_id_idx on claim (claim_id, claim_hash);
        create index if not exists claim_timestamp_idx on claim (timestamp);
        create index if not exists claim_public_key_hash_idx on claim (public_key_hash);

        create index if not exists claim_stream_type_idx on claim (stream_type);
        create index if not exists claim_media_type_idx on claim (media_type);

        create index if not exists claim_signature_valid_idx on claim (signature_valid);
    """

    TAG_INDEXES = '\n'.join(
        f"create unique index if not exists tag_{tag_key}_idx on tag (tag, claim_hash) WHERE tag='{tag_value}';"
        for tag_value, tag_key in COMMON_TAGS.items())

    CREATE_TABLES_QUERY = (CREATE_CLAIM_TABLE + CREATE_FULL_TEXT_SEARCH +
                           CREATE_SUPPORT_TABLE + CREATE_CLAIMTRIE_TABLE +
                           CREATE_TAG_TABLE)

    def __init__(self, main, path: str, blocking_channels: list,
                 filtering_channels: list, trending: list):
        self.main = main
        self._db_path = path
        self.db = None
        self.logger = class_logger(__name__, self.__class__.__name__)
        self.ledger = Ledger if main.coin.NET == 'mainnet' else RegTestLedger
        self._fts_synced = False
        self.state_manager = None
        self.blocked_streams = None
        self.blocked_channels = None
        self.blocking_channel_hashes = {
            unhexlify(channel_id)[::-1]
            for channel_id in blocking_channels if channel_id
        }
        self.filtered_streams = None
        self.filtered_channels = None
        self.filtering_channel_hashes = {
            unhexlify(channel_id)[::-1]
            for channel_id in filtering_channels if channel_id
        }
        self.trending = trending

    def open(self):
        self.db = apsw.Connection(
            self._db_path,
            flags=(apsw.SQLITE_OPEN_READWRITE | apsw.SQLITE_OPEN_CREATE
                   | apsw.SQLITE_OPEN_URI))

        def exec_factory(cursor, statement, bindings):
            tpl = namedtuple('row', (d[0] for d in cursor.getdescription()))
            cursor.setrowtrace(lambda cursor, row: tpl(*row))
            return True

        self.db.setexectrace(exec_factory)
        self.execute(self.PRAGMAS)
        self.execute(self.CREATE_TABLES_QUERY)
        register_canonical_functions(self.db)
        self.state_manager = Manager()
        self.blocked_streams = self.state_manager.dict()
        self.blocked_channels = self.state_manager.dict()
        self.filtered_streams = self.state_manager.dict()
        self.filtered_channels = self.state_manager.dict()
        self.update_blocked_and_filtered_claims()
        for algorithm in self.trending:
            algorithm.install(self.db)

    def close(self):
        if self.db is not None:
            self.db.close()
        if self.state_manager is not None:
            self.state_manager.shutdown()

    def update_blocked_and_filtered_claims(self):
        self.update_claims_from_channel_hashes(self.blocked_streams,
                                               self.blocked_channels,
                                               self.blocking_channel_hashes)
        self.update_claims_from_channel_hashes(self.filtered_streams,
                                               self.filtered_channels,
                                               self.filtering_channel_hashes)
        self.filtered_streams.update(self.blocked_streams)
        self.filtered_channels.update(self.blocked_channels)

    def update_claims_from_channel_hashes(self, shared_streams,
                                          shared_channels, channel_hashes):
        streams, channels = {}, {}
        if channel_hashes:
            sql = query(
                "SELECT claim.channel_hash, claim.reposted_claim_hash, reposted.claim_type "
                "FROM claim JOIN claim AS reposted ON (reposted.claim_hash=claim.reposted_claim_hash)",
                **{
                    'claim.reposted_claim_hash__is_not_null': 1,
                    'claim.channel_hash__in': channel_hashes
                })
            for blocked_claim in self.execute(*sql):
                if blocked_claim.claim_type == CLAIM_TYPES['stream']:
                    streams[blocked_claim.
                            reposted_claim_hash] = blocked_claim.channel_hash
                elif blocked_claim.claim_type == CLAIM_TYPES['channel']:
                    channels[blocked_claim.
                             reposted_claim_hash] = blocked_claim.channel_hash
        shared_streams.clear()
        shared_streams.update(streams)
        shared_channels.clear()
        shared_channels.update(channels)

    @staticmethod
    def _insert_sql(table: str, data: dict) -> Tuple[str, list]:
        columns, values = [], []
        for column, value in data.items():
            columns.append(column)
            values.append(value)
        sql = (f"INSERT INTO {table} ({', '.join(columns)}) "
               f"VALUES ({', '.join(['?'] * len(values))})")
        return sql, values

    @staticmethod
    def _update_sql(table: str, data: dict, where: str,
                    constraints: Union[list, tuple]) -> Tuple[str, list]:
        columns, values = [], []
        for column, value in data.items():
            columns.append(f"{column} = ?")
            values.append(value)
        values.extend(constraints)
        return f"UPDATE {table} SET {', '.join(columns)} WHERE {where}", values

    @staticmethod
    def _delete_sql(table: str, constraints: dict) -> Tuple[str, dict]:
        where, values = constraints_to_sql(constraints)
        return f"DELETE FROM {table} WHERE {where}", values

    def execute(self, *args):
        return self.db.cursor().execute(*args)

    def executemany(self, *args):
        return self.db.cursor().executemany(*args)

    def begin(self):
        self.execute('begin;')

    def commit(self):
        self.execute('commit;')

    def _upsertable_claims(self,
                           txos: List[Output],
                           header,
                           clear_first=False):
        claim_hashes, claims, tags = set(), [], {}
        for txo in txos:
            tx = txo.tx_ref.tx

            try:
                assert txo.claim_name
                assert txo.normalized_name
            except:
                #self.logger.exception(f"Could not decode claim name for {tx.id}:{txo.position}.")
                continue

            claim_hash = txo.claim_hash
            claim_hashes.add(claim_hash)
            claim_record = {
                'claim_hash': claim_hash,
                'claim_id': txo.claim_id,
                'claim_name': txo.claim_name,
                'normalized': txo.normalized_name,
                'txo_hash': txo.ref.hash,
                'tx_position': tx.position,
                'amount': txo.amount,
                'timestamp': header['timestamp'],
                'height': tx.height,
                'title': None,
                'description': None,
                'author': None,
                'duration': None,
                'claim_type': None,
                'stream_type': None,
                'media_type': None,
                'release_time': None,
                'fee_currency': None,
                'fee_amount': 0,
                'reposted_claim_hash': None
            }
            claims.append(claim_record)

            try:
                claim = txo.claim
            except:
                #self.logger.exception(f"Could not parse claim protobuf for {tx.id}:{txo.position}.")
                continue

            if claim.is_stream:
                claim_record['claim_type'] = CLAIM_TYPES['stream']
                claim_record['media_type'] = claim.stream.source.media_type
                claim_record['stream_type'] = STREAM_TYPES[guess_stream_type(
                    claim_record['media_type'])]
                claim_record['title'] = claim.stream.title
                claim_record['description'] = claim.stream.description
                claim_record['author'] = claim.stream.author
                if claim.stream.video and claim.stream.video.duration:
                    claim_record['duration'] = claim.stream.video.duration
                if claim.stream.audio and claim.stream.audio.duration:
                    claim_record['duration'] = claim.stream.audio.duration
                if claim.stream.release_time:
                    claim_record['release_time'] = claim.stream.release_time
                if claim.stream.has_fee:
                    fee = claim.stream.fee
                    if isinstance(fee.currency, str):
                        claim_record['fee_currency'] = fee.currency.lower()
                    if isinstance(fee.amount, Decimal):
                        claim_record['fee_amount'] = int(fee.amount * 1000)
            elif claim.is_repost:
                claim_record[
                    'reposted_claim_hash'] = claim.repost.reference.claim_hash
            elif claim.is_channel:
                claim_record['claim_type'] = CLAIM_TYPES['channel']

            for tag in clean_tags(claim.message.tags):
                tags[(tag, claim_hash)] = (tag, claim_hash, tx.height)

        if clear_first:
            self._clear_claim_metadata(claim_hashes)

        if tags:
            self.executemany(
                "INSERT OR IGNORE INTO tag (tag, claim_hash, height) VALUES (?, ?, ?)",
                tags.values())

        return claims

    def insert_claims(self, txos: List[Output], header):
        claims = self._upsertable_claims(txos, header)
        if claims:
            self.executemany(
                """
                INSERT OR IGNORE INTO claim (
                    claim_hash, claim_id, claim_name, normalized, txo_hash, tx_position, amount,
                    claim_type, media_type, stream_type, timestamp, creation_timestamp,
                    fee_currency, fee_amount, title, description, author, duration, height, reposted_claim_hash,
                    creation_height, release_time, activation_height, expiration_height, short_url)
                VALUES (
                    :claim_hash, :claim_id, :claim_name, :normalized, :txo_hash, :tx_position, :amount,
                    :claim_type, :media_type, :stream_type, :timestamp, :timestamp,
                    :fee_currency, :fee_amount, :title, :description, :author, :duration, :height, :reposted_claim_hash, :height,
                    CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE :timestamp END,
                    CASE WHEN :normalized NOT IN (SELECT normalized FROM claimtrie) THEN :height END,
                    CASE WHEN :height >= 137181 THEN :height+2102400 ELSE :height+262974 END,
                    :claim_name||COALESCE(
                        (SELECT shortest_id(claim_id, :claim_id) FROM claim WHERE normalized = :normalized),
                        '#'||substr(:claim_id, 1, 1)
                    )
                )""", claims)

    def update_claims(self, txos: List[Output], header):
        claims = self._upsertable_claims(txos, header, clear_first=True)
        if claims:
            self.executemany(
                """
                UPDATE claim SET
                    txo_hash=:txo_hash, tx_position=:tx_position, amount=:amount, height=:height,
                    claim_type=:claim_type, media_type=:media_type, stream_type=:stream_type,
                    timestamp=:timestamp, fee_amount=:fee_amount, fee_currency=:fee_currency,
                    title=:title, duration=:duration, description=:description, author=:author, reposted_claim_hash=:reposted_claim_hash,
                    release_time=CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE release_time END
                WHERE claim_hash=:claim_hash;
                """, claims)

    def delete_claims(self, claim_hashes: Set[bytes]):
        """ Deletes claim supports and from claimtrie in case of an abandon. """
        if claim_hashes:
            affected_channels = self.execute(
                *query("SELECT channel_hash FROM claim",
                       channel_hash__is_not_null=1,
                       claim_hash__in=claim_hashes)).fetchall()
            for table in ('claim', 'support', 'claimtrie'):
                self.execute(
                    *self._delete_sql(table, {'claim_hash__in': claim_hashes}))
            self._clear_claim_metadata(claim_hashes)
            return {r.channel_hash for r in affected_channels}
        return set()

    def _clear_claim_metadata(self, claim_hashes: Set[bytes]):
        if claim_hashes:
            for table in ('tag', ):  # 'language', 'location', etc
                self.execute(
                    *self._delete_sql(table, {'claim_hash__in': claim_hashes}))

    def split_inputs_into_claims_supports_and_other(self, txis):
        txo_hashes = {txi.txo_ref.hash for txi in txis}
        claims = self.execute(
            *query("SELECT txo_hash, claim_hash, normalized FROM claim",
                   txo_hash__in=txo_hashes)).fetchall()
        txo_hashes -= {r.txo_hash for r in claims}
        supports = {}
        if txo_hashes:
            supports = self.execute(
                *query("SELECT txo_hash, claim_hash FROM support",
                       txo_hash__in=txo_hashes)).fetchall()
            txo_hashes -= {r.txo_hash for r in supports}
        return claims, supports, txo_hashes

    def insert_supports(self, txos: List[Output]):
        supports = []
        for txo in txos:
            tx = txo.tx_ref.tx
            supports.append((txo.ref.hash, tx.position, tx.height,
                             txo.claim_hash, txo.amount))
        if supports:
            self.executemany(
                "INSERT OR IGNORE INTO support ("
                "   txo_hash, tx_position, height, claim_hash, amount"
                ") "
                "VALUES (?, ?, ?, ?, ?)", supports)

    def delete_supports(self, txo_hashes: Set[bytes]):
        if txo_hashes:
            self.execute(
                *self._delete_sql('support', {'txo_hash__in': txo_hashes}))

    def calculate_reposts(self, txos: List[Output]):
        targets = set()
        for txo in txos:
            try:
                claim = txo.claim
            except:
                continue
            if claim.is_repost:
                targets.add((claim.repost.reference.claim_hash, ))
        if targets:
            self.executemany(
                """
                UPDATE claim SET reposted = (
                    SELECT count(*) FROM claim AS repost WHERE repost.reposted_claim_hash = claim.claim_hash
                )
                WHERE claim_hash = ?
                """, targets)

    def validate_channel_signatures(self, height, new_claims, updated_claims,
                                    spent_claims, affected_channels, timer):
        if not new_claims and not updated_claims and not spent_claims:
            return

        sub_timer = timer.add_timer('segregate channels and signables')
        sub_timer.start()
        channels, new_channel_keys, signables = {}, {}, {}
        for txo in chain(new_claims, updated_claims):
            try:
                claim = txo.claim
            except:
                continue
            if claim.is_channel:
                channels[txo.claim_hash] = txo
                new_channel_keys[
                    txo.claim_hash] = claim.channel.public_key_bytes
            else:
                signables[txo.claim_hash] = txo
        sub_timer.stop()

        sub_timer = timer.add_timer('make list of channels we need to lookup')
        sub_timer.start()
        missing_channel_keys = set()
        for txo in signables.values():
            claim = txo.claim
            if claim.is_signed and claim.signing_channel_hash not in new_channel_keys:
                missing_channel_keys.add(claim.signing_channel_hash)
        sub_timer.stop()

        sub_timer = timer.add_timer('lookup missing channels')
        sub_timer.start()
        all_channel_keys = {}
        if new_channel_keys or missing_channel_keys or affected_channels:
            all_channel_keys = dict(
                self.execute(
                    *query("SELECT claim_hash, public_key_bytes FROM claim",
                           claim_hash__in=set(new_channel_keys)
                           | missing_channel_keys | affected_channels)))
        sub_timer.stop()

        sub_timer = timer.add_timer('prepare for updating claims')
        sub_timer.start()
        changed_channel_keys = {}
        for claim_hash, new_key in new_channel_keys.items():
            if claim_hash not in all_channel_keys or all_channel_keys[
                    claim_hash] != new_key:
                all_channel_keys[claim_hash] = new_key
                changed_channel_keys[claim_hash] = new_key

        claim_updates = []

        for claim_hash, txo in signables.items():
            claim = txo.claim
            update = {
                'claim_hash': claim_hash,
                'channel_hash': None,
                'signature': None,
                'signature_digest': None,
                'signature_valid': None
            }
            if claim.is_signed:
                update.update({
                    'channel_hash':
                    claim.signing_channel_hash,
                    'signature':
                    txo.get_encoded_signature(),
                    'signature_digest':
                    txo.get_signature_digest(self.ledger),
                    'signature_valid':
                    0
                })
            claim_updates.append(update)
        sub_timer.stop()

        sub_timer = timer.add_timer(
            'find claims affected by a change in channel key')
        sub_timer.start()
        if changed_channel_keys:
            sql = f"""
            SELECT * FROM claim WHERE
                channel_hash IN ({','.join('?' for _ in changed_channel_keys)}) AND
                signature IS NOT NULL
            """
            for affected_claim in self.execute(sql,
                                               changed_channel_keys.keys()):
                if affected_claim.claim_hash not in signables:
                    claim_updates.append({
                        'claim_hash': affected_claim.claim_hash,
                        'channel_hash': affected_claim.channel_hash,
                        'signature': affected_claim.signature,
                        'signature_digest': affected_claim.signature_digest,
                        'signature_valid': 0
                    })
        sub_timer.stop()

        sub_timer = timer.add_timer('verify signatures')
        sub_timer.start()
        for update in claim_updates:
            channel_pub_key = all_channel_keys.get(update['channel_hash'])
            if channel_pub_key and update['signature']:
                update['signature_valid'] = Output.is_signature_valid(
                    bytes(update['signature']),
                    bytes(update['signature_digest']), channel_pub_key)
        sub_timer.stop()

        sub_timer = timer.add_timer('update claims')
        sub_timer.start()
        if claim_updates:
            self.executemany(
                f"""
                UPDATE claim SET 
                    channel_hash=:channel_hash, signature=:signature, signature_digest=:signature_digest,
                    signature_valid=:signature_valid,
                    channel_join=CASE
                        WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN channel_join
                        WHEN :signature_valid=1 THEN {height}
                    END,
                    canonical_url=CASE
                        WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN canonical_url
                        WHEN :signature_valid=1 THEN
                            (SELECT short_url FROM claim WHERE claim_hash=:channel_hash)||'/'||
                            claim_name||COALESCE(
                                (SELECT shortest_id(other_claim.claim_id, claim.claim_id) FROM claim AS other_claim
                                 WHERE other_claim.signature_valid = 1 AND
                                       other_claim.channel_hash = :channel_hash AND
                                       other_claim.normalized = claim.normalized),
                                '#'||substr(claim_id, 1, 1)
                            )
                    END
                WHERE claim_hash=:claim_hash;
                """, claim_updates)
        sub_timer.stop()

        sub_timer = timer.add_timer('update claims affected by spent channels')
        sub_timer.start()
        if spent_claims:
            self.execute(
                f"""
                UPDATE claim SET
                    signature_valid=CASE WHEN signature IS NOT NULL THEN 0 END,
                    channel_join=NULL, canonical_url=NULL
                WHERE channel_hash IN ({','.join('?' for _ in spent_claims)})
                """, spent_claims)
        sub_timer.stop()

        sub_timer = timer.add_timer('update channels')
        sub_timer.start()
        if channels:
            self.executemany(
                """
                UPDATE claim SET
                    public_key_bytes=:public_key_bytes,
                    public_key_hash=:public_key_hash
                WHERE claim_hash=:claim_hash""", [{
                    'claim_hash':
                    claim_hash,
                    'public_key_bytes':
                    txo.claim.channel.public_key_bytes,
                    'public_key_hash':
                    self.ledger.address_to_hash160(
                        self.ledger.public_key_to_address(
                            txo.claim.channel.public_key_bytes))
                } for claim_hash, txo in channels.items()])
        sub_timer.stop()

        sub_timer = timer.add_timer('update claims_in_channel counts')
        sub_timer.start()
        if all_channel_keys:
            self.executemany(
                f"""
                UPDATE claim SET
                    claims_in_channel=(
                        SELECT COUNT(*) FROM claim AS claim_in_channel
                        WHERE claim_in_channel.signature_valid=1 AND
                              claim_in_channel.channel_hash=claim.claim_hash
                    )
                WHERE claim_hash = ?
            """,
                [(channel_hash, ) for channel_hash in all_channel_keys.keys()])
        sub_timer.stop()

        sub_timer = timer.add_timer('update blocked claims list')
        sub_timer.start()
        if (self.blocking_channel_hashes.intersection(all_channel_keys) or
                self.filtering_channel_hashes.intersection(all_channel_keys)):
            self.update_blocked_and_filtered_claims()
        sub_timer.stop()

    def _update_support_amount(self, claim_hashes):
        if claim_hashes:
            self.execute(
                f"""
                UPDATE claim SET
                    support_amount = COALESCE(
                        (SELECT SUM(amount) FROM support WHERE support.claim_hash=claim.claim_hash), 0
                    )
                WHERE claim_hash IN ({','.join('?' for _ in claim_hashes)})
            """, claim_hashes)

    def _update_effective_amount(self, height, claim_hashes=None):
        self.execute(
            f"UPDATE claim SET effective_amount = amount + support_amount "
            f"WHERE activation_height = {height}")
        if claim_hashes:
            self.execute(
                f"UPDATE claim SET effective_amount = amount + support_amount "
                f"WHERE activation_height < {height} "
                f"  AND claim_hash IN ({','.join('?' for _ in claim_hashes)})",
                claim_hashes)

    def _calculate_activation_height(self, height):
        last_take_over_height = f"""COALESCE(
            (SELECT last_take_over_height FROM claimtrie
            WHERE claimtrie.normalized=claim.normalized),
            {height}
        )
        """
        self.execute(f"""
            UPDATE claim SET activation_height = 
                {height} + min(4032, cast(({height} - {last_take_over_height}) / 32 AS INT))
            WHERE activation_height IS NULL
        """)

    def _perform_overtake(self, height, changed_claim_hashes, deleted_names):
        deleted_names_sql = claim_hashes_sql = ""
        if changed_claim_hashes:
            claim_hashes_sql = f"OR claim_hash IN ({','.join('?' for _ in changed_claim_hashes)})"
        if deleted_names:
            deleted_names_sql = f"OR normalized IN ({','.join('?' for _ in deleted_names)})"
        overtakes = self.execute(
            f"""
            SELECT winner.normalized, winner.claim_hash,
                   claimtrie.claim_hash AS current_winner,
                   MAX(winner.effective_amount) AS max_winner_effective_amount
            FROM (
                SELECT normalized, claim_hash, effective_amount FROM claim
                WHERE normalized IN (
                    SELECT normalized FROM claim WHERE activation_height={height} {claim_hashes_sql}
                ) {deleted_names_sql}
                ORDER BY effective_amount DESC, height ASC, tx_position ASC
            ) AS winner LEFT JOIN claimtrie USING (normalized)
            GROUP BY winner.normalized
            HAVING current_winner IS NULL OR current_winner <> winner.claim_hash
        """,
            list(changed_claim_hashes) + deleted_names)
        for overtake in overtakes:
            if overtake.current_winner:
                self.execute(
                    f"UPDATE claimtrie SET claim_hash = ?, last_take_over_height = {height} "
                    f"WHERE normalized = ?",
                    (overtake.claim_hash, overtake.normalized))
            else:
                self.execute(
                    f"INSERT INTO claimtrie (claim_hash, normalized, last_take_over_height) "
                    f"VALUES (?, ?, {height})",
                    (overtake.claim_hash, overtake.normalized))
            self.execute(
                f"UPDATE claim SET activation_height = {height} WHERE normalized = ? "
                f"AND (activation_height IS NULL OR activation_height > {height})",
                (overtake.normalized, ))

    def _copy(self, height):
        if height > 50:
            self.execute(f"DROP TABLE claimtrie{height-50}")
        self.execute(
            f"CREATE TABLE claimtrie{height} AS SELECT * FROM claimtrie")

    def update_claimtrie(self, height, changed_claim_hashes, deleted_names,
                         timer):
        r = timer.run

        r(self._calculate_activation_height, height)
        r(self._update_support_amount, changed_claim_hashes)

        r(self._update_effective_amount, height, changed_claim_hashes)
        r(self._perform_overtake, height, changed_claim_hashes,
          list(deleted_names))

        r(self._update_effective_amount, height)
        r(self._perform_overtake, height, [], [])

    def get_expiring(self, height):
        return self.execute(
            f"SELECT claim_hash, normalized FROM claim WHERE expiration_height = {height}"
        )

    def advance_txs(self, height, all_txs, header, daemon_height, timer):
        insert_claims = []
        update_claims = []
        update_claim_hashes = set()
        delete_claim_hashes = set()
        insert_supports = []
        delete_support_txo_hashes = set()
        recalculate_claim_hashes = set(
        )  # added/deleted supports, added/updated claim
        deleted_claim_names = set()
        delete_others = set()
        body_timer = timer.add_timer('body')
        for position, (etx, txid) in enumerate(all_txs):
            tx = timer.run(Transaction,
                           etx.raw,
                           height=height,
                           position=position)
            # Inputs
            spent_claims, spent_supports, spent_others = timer.run(
                self.split_inputs_into_claims_supports_and_other, tx.inputs)
            body_timer.start()
            delete_claim_hashes.update({r.claim_hash for r in spent_claims})
            deleted_claim_names.update({r.normalized for r in spent_claims})
            delete_support_txo_hashes.update(
                {r.txo_hash
                 for r in spent_supports})
            recalculate_claim_hashes.update(
                {r.claim_hash
                 for r in spent_supports})
            delete_others.update(spent_others)
            # Outputs
            for output in tx.outputs:
                if output.is_support:
                    insert_supports.append(output)
                    recalculate_claim_hashes.add(output.claim_hash)
                elif output.script.is_claim_name:
                    insert_claims.append(output)
                    recalculate_claim_hashes.add(output.claim_hash)
                elif output.script.is_update_claim:
                    claim_hash = output.claim_hash
                    update_claims.append(output)
                    recalculate_claim_hashes.add(claim_hash)
            body_timer.stop()

        skip_update_claim_timer = timer.add_timer(
            'skip update of abandoned claims')
        skip_update_claim_timer.start()
        for updated_claim in list(update_claims):
            if updated_claim.ref.hash in delete_others:
                update_claims.remove(updated_claim)
        for updated_claim in update_claims:
            claim_hash = updated_claim.claim_hash
            delete_claim_hashes.discard(claim_hash)
            update_claim_hashes.add(claim_hash)
        skip_update_claim_timer.stop()

        skip_insert_claim_timer = timer.add_timer(
            'skip insertion of abandoned claims')
        skip_insert_claim_timer.start()
        for new_claim in list(insert_claims):
            if new_claim.ref.hash in delete_others:
                if new_claim.claim_hash not in update_claim_hashes:
                    insert_claims.remove(new_claim)
        skip_insert_claim_timer.stop()

        skip_insert_support_timer = timer.add_timer(
            'skip insertion of abandoned supports')
        skip_insert_support_timer.start()
        for new_support in list(insert_supports):
            if new_support.ref.hash in delete_others:
                insert_supports.remove(new_support)
        skip_insert_support_timer.stop()

        expire_timer = timer.add_timer('recording expired claims')
        expire_timer.start()
        for expired in self.get_expiring(height):
            delete_claim_hashes.add(expired.claim_hash)
            deleted_claim_names.add(expired.normalized)
        expire_timer.stop()

        r = timer.run
        r(update_full_text_search, 'before-delete', delete_claim_hashes,
          self.db.cursor(), self.main.first_sync)
        affected_channels = r(self.delete_claims, delete_claim_hashes)
        r(self.delete_supports, delete_support_txo_hashes)
        r(self.insert_claims, insert_claims, header)
        r(self.calculate_reposts, insert_claims)
        r(update_full_text_search,
          'after-insert', [txo.claim_hash for txo in insert_claims],
          self.db.cursor(), self.main.first_sync)
        r(update_full_text_search,
          'before-update', [txo.claim_hash for txo in update_claims],
          self.db.cursor(), self.main.first_sync)
        r(self.update_claims, update_claims, header)
        r(update_full_text_search,
          'after-update', [txo.claim_hash for txo in update_claims],
          self.db.cursor(), self.main.first_sync)
        r(self.validate_channel_signatures,
          height,
          insert_claims,
          update_claims,
          delete_claim_hashes,
          affected_channels,
          forward_timer=True)
        r(self.insert_supports, insert_supports)
        r(self.update_claimtrie,
          height,
          recalculate_claim_hashes,
          deleted_claim_names,
          forward_timer=True)
        for algorithm in self.trending:
            r(algorithm.run, self.db.cursor(), height, daemon_height,
              recalculate_claim_hashes)
        if not self._fts_synced and self.main.first_sync and height == daemon_height:
            r(first_sync_finished, self.db.cursor())
            self._fts_synced = True
Example #51
0
class SIB():
    """Share Is Beautiful
	"""
    def __init__(self):
        logging.basicConfig(
            filename='SIB.log',
            format=
            '%(asctime)s | %(process)d | %(processName)s | %(levelname)s | %(module)s | %(funcName)s | %(message)s',
            level=logging.DEBUG)

        #shared objects for inter-process communication
        #TODO: WTH is there so much shared state?
        self.raw_in_queue = Queue()
        self.json_call_queue = Queue()
        self.json_out_queue = Queue()
        self.manager = Manager()
        self.json_response_dict = self.manager.dict()
        #self.listen_sockets = self.manager.list()
        self.new_sockets = Queue()
        self.congest_sent_queue = Queue()
        self.congest_recv_queue = Queue()
        self.congest_connect_state = self.manager.dict()

        self.ss = SocketServer()
        #self.ss.listen_sockets = self.listen_sockets
        self.ss.new_sockets = self.new_sockets
        self.ss.raw_in_queue = self.raw_in_queue

        self.pp = PacketPrePostprocessor()
        self.pp.raw_in_queue = self.raw_in_queue
        self.pp.json_queue = self.json_call_queue
        self.pp.json_response_dict = self.json_response_dict
        self.pp.json_out_queue = self.json_out_queue
        self.pp.congest_sent_queue = self.congest_sent_queue
        self.pp.congest_recv_queue = self.congest_recv_queue

        self.js = JSONServer()
        self.js.json_call_queue = self.json_call_queue
        self.js.json_response_dict = self.json_response_dict
        self.js.json_out_queue = self.json_out_queue
        self.js.congest_connect_state = self.congest_connect_state

        self.congest = CongestionManager()
        self.congest.shared_sent_queue = self.congest_sent_queue
        self.congest.shared_recv_queue = self.congest_recv_queue
        self.congest.connect_summary = self.congest_connect_state

    def display(self):
        """Prints contents of SIB for debug purposes.
		"""
        #self.raw_in_queue = Queue()
        #self.json_call_queue = Queue()
        #self.json_out_queue = Queue()
        #self.json_response_dict = self.manager.dict()
        print '=========Printing details of SIB object========='
        print "raw_in_queue size: %s" % (self.raw_in_queue.qsize())
        print "json_call_queue size: %s" % (self.json_call_queue.qsize())
        print "json_out_queuesize: %s" % (self.json_out_queue.qsize())
        print "json_response_dict  size: %s" % (len(self.json_response_dict))
        print '------------------------------------------------------'

    def run_once(self):
        self.ss.serve_once()
        self.pp.run_pre_once()
        self.js.serve_once()
        self.pp.run_post_once()

    def run_forever(self):
        logging.info('serving forever')
        self.ss_proc = self.ss.serve_forever_process()
        self.pp.start_pre_pool(2)
        self.js.start_pool(2)
        self.pp.start_post_pool(2)
        self.congest_proc = self.congest.serve_forever_process()

    def terminate(self):
        logging.info('terminating all processes')
        self.ss_proc.terminate()
        self.congest_proc.terminate()
        self.pp.terminate()
        self.js.terminate()

    def process_for(self, t):
        """Continue processing for t seconds.
		"""
        end = time.time() + t
        while (time.time() < end):
            self.process_all()

    def process_all(self):
        """Continue processing packets if they are available.
		If there is nothing to process, wait 0.01 seconds for
		something to process.  If nothing shows up then return.
		"""
        logging.debug('starting process all')
        packets_processed = 0
        should_wait = True
        while (True):
            did_work = False
            if (self.ss.serve_once()):
                did_work = True
            if (self.pp.run_pre_once()):
                did_work = True
            if (self.js.serve_once()):
                packets_processed += 1
                did_work = True
            if (self.pp.run_post_once()):
                did_work = True
            self.congest.run_once()
            if did_work:
                should_wait = True
                continue
            if not should_wait:
                break
            time.sleep(0.01)
            should_wait = False
        logging.debug('packets processed: %d', packets_processed)
Example #52
0
                                td_error = self.ids[aid].critic.learn(
                                    exp[0], exp[3], exp[1])
                                self.ids[aid].actor.learn(
                                    exp[4], exp[2], td_error)
                    f_num += 1
            self.memory['experience_pool_%d' % gpu_id] = experience_pool_new
            e_e.set()
            s_e.clear()


if __name__ == '__main__':
    with tf.device('/cpu:0'):
        game = Game()

        manager = Manager()
        memory = manager.dict()

        states = collections.defaultdict(list)
        for i in range(game.N):
            for j in range(len(game.generator.matrix[i])):
                if game.generator.matrix[i][j] == 1:
                    states[i].append(0)
            node = game.ids[i]
            for d in node.table:
                states[i].append(0)
            for d in node.table_peer:
                states[i].append(0)
            for d in node.table_provider:
                states[i].append(0)
        memory['states'] = states
Example #53
0
def ecg_segmentation(data, parameters):
    """
    Performs ECG segmentation on data according to specified parameters.

    Input:
    
        data (dict): input data. 
                     example: data = {1: record 1 data, 2: record 2 data}
        
        parameters (dict): filter parameters.
                           Example: parameters = {'model': 'engzee'}
                                                  
    Output:
        
        output (dict): output data where keys correspond to record id numbers.
                       example: output = { 1: {'segments': record 1 signal segments, 'R': record 1 r peak indexes},
                                           2: {'segments': record 2 signal segments, 'R': record 2 r peak indexes}}

    Configurable fields:{"name": "??.??", "config": {}, "inputs": ["data", "parameters"], "outputs": ["output"]}

    See Also:

    Notes:

    Example:
       
    """
    if parameters['model'] == 'engzee':
        model = ecgmodule.batch_engzee
    else:
        raise TypeError, "Model %s not implemented." % parameters['model']
    # create work queue
    work_queue = Queue()
    manager = Manager()
    output = manager.dict()
    output['info'] = parameters
    parameters.pop('model')
    # fill queue
    for recid in data.keys():
        if recid == 'info': continue
        work_queue.put({
            'function': model,
            'data': data.get(recid),
            'parameters': parameters,
            'recid': recid
        })
    # create N processes and associate them with the work_queue and do_work function
    processes = [
        Process(target=segmentation_work, args=(
            work_queue,
            output,
        )) for _ in range(NUMBER_PROCESSES)
    ]
    # lauch processes
    for p in processes:
        p.start()
    # wait for processes to finish
    print "waiting ..."
    for p in processes:
        p.join()
    print "waiting is over"
    for p in processes:
        p.terminate()

    return output
Example #54
0
    def _handle(self, context):
        millis = context.args['duration']
        time_range = DateTimeRangeModel(millis * 1000)  # microseconds
        return {'range': time_range}


"""
Resource endpoints.
We create a multiprocessing memory manager and shared
dict to enable multithreaded support.
This 'resource' data is used to test patching.
"""

BASE_RESOURCE = {'key': 'value'}
MANAGER = Manager()
RESOURCE = MANAGER.dict(BASE_RESOURCE)


class GetResourceEndpoint(Endpoint):
    """Returns the 'resource' as it exists in memory.
    """

    _uri = "/resource"
    _http_method = 'GET'
    _route_name = "resource_get"

    _returns = DebugResource("app resource.")

    def _handle(self, context):
        return dict(RESOURCE)
Example #55
0
                                    #consensus_output.write(str(element) + "\n")
                                    with open(str(consensus_dict[element]),
                                              "r") as consensus:
                                        #next(consensus)
                                        for line in consensus:
                                            consensus_output.write(line)

#headings_output.close()

                        print sampleQueue

if __name__ == "__main__":
    print "THIS IS MAIN FUNCTION"
    manager = Manager()
    freeze_support()
    passed_dictionary = manager.dict()

    consensus_string = filename + "_Quasi_strains.fasta"
    finalconsensus_string = filename + "_final" + "_Quasi_strains.fasta"

    clearing_string = open((filename + "_Quasi_strains.fasta"), "w")
    clearing_string.close()

    clearing_consensusstring = open(finalconsensus_string, "w")
    clearing_consensusstring.close()

    headings_string = filename + "_leaves"
    finalheadings_string = filename + "_final" + "_leaves"

    clearing_headingsstring = open(headings_string, "w")
    clearing_headingsstring.close()
Example #56
0
    def run(self):

        # -------------------------------------------------------------------------------------
        # Set output data folder
        self.sOutPath = setOutFolder(self.sFileName, self.sOutPath)
        # Set land variable(s) information
        self.oVarsDict = setVarsInfo(self.oVarsDict, self.sOutPath,
                                     self.sDomainName)
        # Info progress algorithm
        self.progress()
        # -------------------------------------------------------------------------------------

        # -------------------------------------------------------------------------------------
        # Read filename and get DEM, LONGITUDE and LATITUDE data
        QgsMessageLog.logMessage(' ==> GET DEM ... ', level=QgsMessageLog.INFO)
        [
            a2dVarData_DEM, a1oVarHeader, a2dVarData_GEOX, a2dVarData_GEOY,
            dVarGeoXStep, dVarGeoYStep, dVarNoData
        ] = readRasterFile(self.sFileName)
        a2dVarData_DEM_F = adjustVarPy2F(deepcopy(a2dVarData_DEM), 'float32')
        QgsMessageLog.logMessage(' ==> GET DEM ... DONE!',
                                 level=QgsMessageLog.INFO)
        self.progress()
        # -------------------------------------------------------------------------------------

        # -------------------------------------------------------------------------------------
        # Compute MASK data
        QgsMessageLog.logMessage(' ==> COMPUTE MASK ... ',
                                 level=QgsMessageLog.INFO)
        a2iVarData_MASK = computeMaskDomain(a2dVarData_DEM, dVarNoData)
        QgsMessageLog.logMessage(' ==> COMPUTE MASK ... DONE!',
                                 level=QgsMessageLog.INFO)
        # Info progress algorithm
        self.progress()
        # -------------------------------------------------------------------------------------

        # -------------------------------------------------------------------------------------
        # Compute CELL_AREA data
        QgsMessageLog.logMessage(' ==> COMPUTE CELLAREA ... ',
                                 level=QgsMessageLog.INFO)
        a2dVarData_CAREA = computeCellArea(a2dVarData_GEOY, dVarGeoXStep,
                                           dVarGeoYStep)
        dVarAvg_CAREA = sqrt(nanmean(a2dVarData_CAREA))
        a2dVarData_CAREA_F = adjustVarPy2F(deepcopy(a2dVarData_CAREA),
                                           'float32')
        QgsMessageLog.logMessage(' ==> COMPUTE CELLAREA ... DONE!',
                                 level=QgsMessageLog.INFO)
        # Info progress algorithm
        self.progress()
        # -------------------------------------------------------------------------------------

        # -------------------------------------------------------------------------------------
        # Compute FLOW_DIRECTIONS data
        QgsMessageLog.logMessage(' ==> COMPUTE FLOW DIRECTIONS ... ',
                                 level=QgsMessageLog.INFO)

        oProcManager = Manager()
        oProcDict = oProcManager.dict()
        self.oProcExc = Process(target=computeFlowDirections,
                                args=(a2dVarData_DEM_F, oProcDict))
        self.oProcExc.start()
        self.oProcExc.join()

        a2iVarData_PNT_F = oProcDict['PNT']
        a2iVarData_PNT = adjustVarF2Py(a2iVarData_PNT_F, 'int32')

        QgsMessageLog.logMessage(' ==> COMPUTE FLOW DIRECTIONS ... DONE!',
                                 level=QgsMessageLog.INFO)
        # Info progress algorithm
        self.progress()
        # -------------------------------------------------------------------------------------

        # -------------------------------------------------------------------------------------
        # Compute DRAINAGE AREA data
        QgsMessageLog.logMessage(' ==> COMPUTE DRAINAGE AREA ... ',
                                 level=QgsMessageLog.INFO)

        oProcManager = Manager()
        oProcDict = oProcManager.dict()
        self.oProcExc = Process(target=computeDrainageArea,
                                args=(a2dVarData_DEM_F, a2iVarData_PNT_F,
                                      oProcDict))
        self.oProcExc.start()
        self.oProcExc.join()

        a2iVarData_DAREA_F = oProcDict['DAREA']

        a2iVarData_DAREA = adjustVarF2Py(a2iVarData_DAREA_F, 'int32')
        QgsMessageLog.logMessage(' ==> COMPUTE DRAINAGE AREA ... DONE!',
                                 level=QgsMessageLog.INFO)
        self.progress()
        # -------------------------------------------------------------------------------------

        # -------------------------------------------------------------------------------------
        # Compute CHANNELS NETWORK and PARTIAL DISTANCE
        QgsMessageLog.logMessage(
            ' ==> COMPUTE CHANNELS NETWORK AND PARTIAL DISTANCE ... ',
            level=QgsMessageLog.INFO)

        oProcManager = Manager()
        oProcDict = oProcManager.dict()
        self.oProcExc = Process(target=computeChannelsNetwork,
                                args=(a2dVarData_DEM_F, a2iVarData_PNT_F,
                                      a2iVarData_DAREA_F, dVarAvg_CAREA,
                                      self.dThrAsk, oProcDict))
        self.oProcExc.start()
        self.oProcExc.join()

        a2iVarData_CNET_F = oProcDict['CNET']
        a2dVarData_PDIST_F = oProcDict['PDIST']

        a2iVarData_CNET = adjustVarF2Py(a2iVarData_CNET_F, 'int32')
        a2dVarData_PDIST = adjustVarF2Py(a2dVarData_PDIST_F, 'float32')
        QgsMessageLog.logMessage(
            ' ==> COMPUTE CHANNELS NETWORK AND PARTIAL DISTANCE ... DONE!',
            level=QgsMessageLog.INFO)
        self.progress()
        # -------------------------------------------------------------------------------------

        # -------------------------------------------------------------------------------------
        # Compute ALPHA and BETA data
        QgsMessageLog.logMessage(' ==> COMPUTE ALPHA AND BETA ... ',
                                 level=QgsMessageLog.INFO)

        oProcManager = Manager()
        oProcDict = oProcManager.dict()
        self.oProcExc = Process(target=computeWatertableSlopes,
                                args=(a2dVarData_DEM_F, a2iVarData_PNT_F,
                                      a2iVarData_CNET_F, a2iVarData_DAREA_F,
                                      self.dThrWts, oProcDict))
        self.oProcExc.start()
        self.oProcExc.join()

        a2dVarData_ALPHA_F = oProcDict['ALPHA']
        a2dVarData_BETA_F = oProcDict['BETA']

        a2dVarData_ALPHA = adjustVarF2Py(a2dVarData_ALPHA_F, 'float64')
        a2dVarData_BETA = adjustVarF2Py(a2dVarData_BETA_F, 'float64')
        QgsMessageLog.logMessage(' ==> COMPUTE ALPHA AND BETA ... DONE!',
                                 level=QgsMessageLog.INFO)
        self.progress()
        # -------------------------------------------------------------------------------------

        # -------------------------------------------------------------------------------------
        # Compute COEFF RESOLUTION data
        QgsMessageLog.logMessage(' ==> COMPUTE COEFF RESOLUTION ... ',
                                 level=QgsMessageLog.INFO)

        oProcManager = Manager()
        oProcDict = oProcManager.dict()
        self.oProcExc = Process(target=computeCoeffResolution,
                                args=(a2dVarData_DEM_F, a2iVarData_DAREA_F,
                                      a2iVarData_CNET_F, a2dVarData_CAREA_F,
                                      self.dThrCRes, oProcDict))
        self.oProcExc.start()
        self.oProcExc.join()

        a2dVarData_CRES_F = oProcDict['CRES']

        a2dVarData_CRES = adjustVarF2Py(a2dVarData_CRES_F, 'float32')
        QgsMessageLog.logMessage(' ==> COMPUTE COEFF RESOLUTION ... DONE!',
                                 level=QgsMessageLog.INFO)
        self.progress()
        # -------------------------------------------------------------------------------------

        # -------------------------------------------------------------------------------------
        # Update variable(s) in a common workspace
        oDictVarName = {
            'Var_01': sVarName_DEM,
            'Var_02': sVarName_GEOX,
            'Var_03': sVarName_GEOY,
            'Var_04': sVarName_MASK,
            'Var_05': sVarName_CAREA,
            'Var_06': sVarName_PNT,
            'Var_07': sVarName_DAREA,
            'Var_08': sVarName_CNET,
            'Var_09': sVarName_PDIST,
            'Var_10': sVarName_ALPHA,
            'Var_11': sVarName_BETA,
            'Var_12': sVarName_CRES,
        }
        oDictVarValue = {
            'Var_01': a2dVarData_DEM,
            'Var_02': a2dVarData_GEOX,
            'Var_03': a2dVarData_GEOY,
            'Var_04': a2iVarData_MASK,
            'Var_05': a2dVarData_CAREA,
            'Var_06': a2iVarData_PNT,
            'Var_07': a2iVarData_DAREA,
            'Var_08': a2iVarData_CNET,
            'Var_09': a2dVarData_PDIST,
            'Var_10': a2dVarData_ALPHA,
            'Var_11': a2dVarData_BETA,
            'Var_12': a2dVarData_CRES
        }
        # Update variable(s) information
        self.oVarsDict = updateVarsInfo(self.oVarsDict, oDictVarName,
                                        oDictVarValue, 'DataValue')
        self.oVarsHeader = a1oVarHeader
Example #57
0
from warnings import simplefilter
from functools import partial
from cryptoalgotrading.riskmanagement import RiskManagement
from cryptoalgotrading.riskmanagement import Binance as Bnb
from multiprocessing import Pool, Manager
from cryptoalgotrading.aux import get_markets_list, \
                log, Bittrex, stop_loss, trailing_stop_loss, \
                timeit, safe, connect_db, get_markets_on_files, \
                manage_files, num_processors, plot_data, \
                check_market_name, get_data_from_file, \
                time_to_index, get_historical_data, \
                binance2btrx
import logging as log

manager = Manager()
cached = manager.dict()


def signal_handler(sig, frame):
    log.info(f"You pressed Ctrl+C!")
    sys.exit(0)


# Prevents FutureWarning from Pandas.
simplefilter(action='ignore', category=FutureWarning)


@safe
def is_time_to_exit(data,
                    funcs,
                    smas=var.default_smas,
Example #58
0
    def execute(self):
        #import the algorithm module
        try:
            importStr = 'from algorithm.rating.' + self.config[
                'recommender'] + ' import ' + self.config['recommender']
            exec(importStr)
        except ImportError:
            importStr = 'from algorithm.ranking.' + self.config[
                'recommender'] + ' import ' + self.config['recommender']
            exec(importStr)
        if self.evaluation.contains('-cv'):
            k = int(self.evaluation['-cv'])
            if k <= 1 or k > 10:
                k = 3
            mkl.set_num_threads(max(1, mkl.get_max_threads() / k))
            #create the manager used to communication in multiprocess
            manager = Manager()
            m = manager.dict()
            i = 1
            tasks = []

            binarized = False
            if self.evaluation.contains('-b'):
                binarized = True

            for train, test in DataSplit.crossValidation(self.trainingData,
                                                         k,
                                                         binarized=binarized):
                fold = '[' + str(i) + ']'
                if self.config.contains('social'):
                    recommender = self.config[
                        'recommender'] + "(self.config,train,test,self.relation,fold)"
                else:
                    recommender = self.config[
                        'recommender'] + "(self.config,train,test,fold)"
            #create the process
                p = Process(target=run, args=(m, eval(recommender), i))
                tasks.append(p)
                i += 1
            #start the processes
            for p in tasks:
                p.start()
            #wait until all processes are completed
            for p in tasks:
                p.join()
            #compute the mean error of k-fold cross validation
            self.measure = [dict(m)[i] for i in range(1, k + 1)]
            res = []
            for i in range(len(self.measure[0])):
                measure = self.measure[0][i].split(':')[0]
                total = 0
                for j in range(k):
                    total += float(self.measure[j][i].split(':')[1])
                res.append(measure + ':' + str(total / k) + '\n')
            #output result
            currentTime = strftime("%Y-%m-%d %H-%M-%S", localtime(time()))
            outDir = LineConfig(self.config['output.setup'])['-dir']
            fileName = self.config[
                'recommender'] + '@' + currentTime + '-' + str(
                    k) + '-fold-cv' + '.txt'
            FileIO.writeFile(outDir, fileName, res)
            print 'The result of %d-fold cross validation:\n%s' % (
                k, ''.join(res))

        else:
            if self.config.contains('social'):
                recommender = self.config[
                    'recommender'] + '(self.config,self.trainingData,self.testData,self.relation)'
            else:
                recommender = self.config[
                    'recommender'] + '(self.config,self.trainingData,self.testData)'
            eval(recommender).execute()
Example #59
0
 def __init__(self):
     manager = Manager()
     self.non_finding = manager.dict()
     self.crash = manager.dict()
     self.timeout = manager.dict()
     self.kasan = manager.dict()
Example #60
0
def kicost(in_file,
           eda_tool_name,
           out_filename,
           user_fields,
           ignore_fields,
           group_fields,
           variant,
           dist_list=list(distributor_dict.keys()),
           num_processes=4,
           scrape_retries=5,
           throttling_delay=0.0,
           collapse_refs=True,
           local_currency='USD'):
    ''' @brief Run KiCost.
    
    Take a schematic input file and create an output file with a cost spreadsheet in xlsx format.
    
    @param in_file `list(str())` List of the names of the input BOM files.
    @param eda_tool_name `list(str())` of the EDA modules to be used to open the `in_file`list.
    @param out_filename `str()` XLSX output file name.
    @param user_fields `list()` of the user fields to be included on the spreadsheet global part.
    @param ignore_fields `list()` of the fields to be ignored on the read EDA modules.
    @param group_fields `list()` of the fields to be grouped/merged on the function group parts that
    are not grouped by default.
    @param variant `list(str())` of regular expression to the BOM variant of each file in `in_file`.
    @param dist_list `list(str())` to be scraped, if empty will be scraped with all distributors
    modules. If `None`, no web/local distributors will be scraped.
    @param num_processes `int()` Number of parallel processes used for web scraping part data. Use
    1 for serial mode.
    @param scrape_retries `int()` Number of attempts to retrieve part data from a website..
    @param throttling_delay `float()` Minimum delay (in seconds) between successive accesses to a
    distributor's website.
    @param collapse_refs `bool()` Collapse or not the designator references in the spreadsheet.
    Default `True`.
    @param local_currency `str()` Local/country in ISO3166:2 and currency in ISO4217. Default 'USD'.
    '''

    # Only keep distributors in the included list and not in the excluded list.
    if dist_list != None:
        if not dist_list:
            dist_list = list(distributor_dict.keys())
        if not 'local_template' in dist_list:
            dist_list += ['local_template'
                          ]  # Needed later for creating non-web distributors.
        for d in list(distributor_dict.keys()):
            if not d in dist_list:
                distributor_dict.pop(d, None)
    else:
        for d in list(distributor_dict.keys()):
            distributor_dict.pop(d, None)

    # Deal with some code exception (only one EDA tool or variant
    # informed in the multiple BOM files input).
    if not isinstance(in_file, list):
        in_file = [in_file]
    if not isinstance(variant, list):
        variant = [variant] * len(in_file)
    elif len(variant) != len(in_file):
        variant = [variant[0]] * len(in_file)  #Assume the first as default.
    if not isinstance(eda_tool_name, list):
        eda_tool_name = [eda_tool_name] * len(in_file)
    elif len(eda_tool_name) != len(in_file):
        eda_tool_name = [eda_tool_name[0]] * len(
            in_file)  #Assume the first as default.

    # Get groups of identical parts.
    parts = dict()
    prj_info = list()
    for i_prj in range(len(in_file)):
        eda_tool_module = eda_modules[eda_tool_name[i_prj]]
        p, info = eda_tool_module.get_part_groups(in_file[i_prj],
                                                  ignore_fields,
                                                  variant[i_prj])
        p = subpartqty_split(p)
        # In the case of multiple BOM files, add the project prefix identifier
        # to each reference/designator. Use the field 'manf#_qty' to control
        # each quantity goes to each project creating a `list()` with length
        # of number of BOM files. This vector will be used in the `group_parts()`
        # to create groups with elements of same 'manf#' that came for different
        # projects.
        if len(in_file) > 1:
            logger.log(
                DEBUG_OVERVIEW,
                'Multi BOMs detected, attaching project identification to references...'
            )
            qty_base = ['0'] * len(in_file)  # Base zero quantity vector.
            for p_ref in list(p.keys()):
                try:
                    qty_base[i_prj] = p[p_ref]['manf#_qty']
                except:
                    qty_base[i_prj] = '1'
                p[p_ref]['manf#_qty'] = qty_base.copy()
                p['prj' + str(i_prj) + SEPRTR + p_ref] = p.pop(p_ref)
        parts.update(p.copy())
        prj_info.append(info.copy())

    # Group part out of the module to be possible to merge different
    # project lists, ignore some field to merge given in the `group_fields`.
    FIELDS_SPREADSHEET = [
        'refs', 'value', 'desc', 'footprint', 'manf', 'manf#'
    ]
    FIELDS_MANFCAT = ([d + '#' for d in distributor_dict] + ['manf#'])
    FIELDS_MANFQTY = ([d + '#_qty' for d in distributor_dict] + ['manf#_qty'])
    FIELDS_IGNORE = FIELDS_SPREADSHEET + FIELDS_MANFCAT + FIELDS_MANFQTY + user_fields + [
        'pricing'
    ]
    for ref, fields in list(parts.items()):
        for f in fields:
            # Merge all extra fields that read on the files that will
            # not be displayed (Needed to check `user_fields`).
            if f not in FIELDS_IGNORE and SEPRTR not in f and not f in group_fields:  # Not include repetitive filed names or fields with the separator `:` defined on `SEPRTR`.
                group_fields += [f]
    # Some fields to be merged on specific EDA are enrolled bellow.
    if 'kicad' in eda_tool_name:
        group_fields += [
            'libpart'
        ]  # This field may be a mess on multiple sheet designs.
    if len(set(eda_tool_name)) > 2:
        # If more than one EDA software was used, ignore the 'footprint'
        # field, because they could have different libraries names.
        group_fields += ['footprint']
    group_fields += ['desc', 'var']  # Always ignore 'desc' ('description')
    # and 'var' ('variant') fields, merging
    # the components in groups.
    group_fields = set(group_fields)
    parts = group_parts(parts, group_fields)

    # If do not have the manufacture code 'manf#' and just distributors codes,
    # check if is asked to scrap a distributor that do not have any code in the
    # parts so, exclude this distributors for the scrap list. This decrease the
    # warning messages given during the process.
    all_fields = []
    for p in parts:
        all_fields += list(p.fields.keys())
    all_fields = set(all_fields)
    if not 'manf#' in all_fields:
        dist_not_rmv = [
            d for d in distributor_dict.keys() if d + '#' in all_fields
        ]
        dist_not_rmv += ['local_template'
                         ]  # Needed later for creating non-web distributors.
        #distributor_scrap = {d:distributor_dict[d] for d in dist_not_rmv}
        distributors = distributor_dict.copy().keys()
        for d in distributors:
            if not d in dist_not_rmv:
                logger.warning(
                    "No 'manf#' and '%s#' field in any part: distributor '%s' will be not scraped.",
                    d, distributor_dict[d]['label'])
                distributor_dict.pop(d, None)

    # Create an HTML page containing all the local part information.
    local_part_html = create_local_part_html(parts, distributor_dict)

    if logger.isEnabledFor(DEBUG_DETAILED):
        pprint.pprint(distributor_dict)

    # Get the distributor product page for each part and scrape the part data.
    if dist_list:

        if local_currency:
            logger.log(
                DEBUG_OVERVIEW,
                '# Configuring the distributors locale and currency...')
            if num_processes <= 1:
                for d in distributor_dict:
                    config_distributor(distributor_dict[d]['module'],
                                       local_currency)
            else:
                logger.log(
                    DEBUG_OBSESSIVE, 'Using {} simultaneous access...'.format(
                        min(len(distributor_dict), num_processes)))
                pool = Pool(num_processes)
                for d in distributor_dict:
                    args = [distributor_dict[d]['module'], local_currency]
                    pool.apply_async(config_distributor, args)
                pool.close()
                pool.join()

        logger.log(DEBUG_OVERVIEW,
                   '# Scraping part data for each component group...')
        # Set the throttling delay for each distributor.
        for d in distributor_dict:
            distributor_dict[d]['throttling_delay'] = throttling_delay

        global scraping_progress
        scraping_progress = tqdm.tqdm(desc='Progress',
                                      total=len(parts),
                                      unit='part',
                                      miniters=1)

        # Change the logging print channel to `tqdm` to keep the process bar to the end of terminal.
        class TqdmLoggingHandler(logging.Handler):
            '''Overload the class to write the logging through the `tqdm`.'''
            def __init__(self, level=logging.NOTSET):
                super(self.__class__, self).__init__(level)

            def emit(self, record):
                try:
                    msg = self.format(record)
                    tqdm.tqdm.write(msg)
                    self.flush()
                except (KeyboardInterrupt, SystemExit):
                    raise
                except:
                    self.handleError(record)

        logger.addHandler(TqdmLoggingHandler())

        if num_processes <= 1:
            # Scrape data, one part at a time using single processing.

            class DummyLock:
                """Dummy synchronization lock used when single processing."""
                def __init__(self):
                    pass

                def acquire(*args, **kwargs):
                    return True  # Lock can ALWAYS be acquired when just one process is running.

                def release(*args, **kwargs):
                    pass

            # Create sync lock and timeouts to control the rate at which distributor
            # websites are scraped.
            throttle_lock = DummyLock()
            throttle_timeouts = dict()
            throttle_timeouts = {d: time() for d in distributor_dict}

            for i in range(len(parts)):
                args = (i, parts[i], distributor_dict, local_part_html,
                        scrape_retries, logger.getEffectiveLevel(),
                        throttle_lock, throttle_timeouts)
                id, url, part_num, price_tiers, qty_avail, info_dist = scrape_part(
                    args)
                parts[id].part_num = part_num
                parts[id].url = url
                parts[id].price_tiers = price_tiers
                parts[id].qty_avail = qty_avail
                parts[id].info_dist = info_dist  # Extra distributor web page.
                scraping_progress.update(1)
        else:
            # Scrape data, multiple parts at a time using multiprocessing.

            # Create sync lock and timeouts to control the rate at which distributor
            # websites are scraped.
            throttle_manager = Manager()  # Manages shared lock and `dict`.
            throttle_lock = throttle_manager.Lock()
            throttle_timeouts = throttle_manager.dict()
            for d in distributor_dict:
                throttle_timeouts[d] = time()

            # Create pool of processes to scrape data for multiple parts simultaneously.
            pool = Pool(num_processes)

            # Package part data for passing to each process.
            arg_sets = [(i, parts[i], distributor_dict, local_part_html,
                         scrape_retries, logger.getEffectiveLevel(),
                         throttle_lock, throttle_timeouts)
                        for i in range(len(parts))]

            # Define a callback routine for updating the scraping progress bar.
            def update(x):
                scraping_progress.update(1)
                return x

            # Start the web scraping processes, one for each part.
            logger.log(
                DEBUG_OBSESSIVE,
                'Starting {} parallels process to scrap parts...'.format(
                    num_processes))
            results = [
                pool.apply_async(scrape_part, [args], callback=update)
                for args in arg_sets
            ]

            # Wait for all the processes to have results, then kill-off all the scraping processes.
            for r in results:
                while (not r.ready()):
                    pass
            logger.log(DEBUG_OVERVIEW,
                       'All parallels process finished with success.')
            pool.close()
            pool.join()

            # Get the data from each process result structure.
            for result in results:
                id, url, part_num, price_tiers, qty_avail, info_dist = result.get(
                )
                parts[id].part_num = part_num
                parts[id].url = url
                parts[id].price_tiers = price_tiers
                parts[id].qty_avail = qty_avail
                parts[id].info_dist = info_dist  # Extra distributor web page.

        # Done with the scraping progress bar so delete it or else we get an
        # error when the program terminates.
        logger.removeHandler(
            TqdmLoggingHandler())  # Return the print channel of the logging.
        del scraping_progress

    # Create the part pricing spreadsheet.
    create_spreadsheet(parts, prj_info, out_filename, collapse_refs,
                       user_fields,
                       '-'.join(variant) if len(variant) > 1 else variant[0])

    # Print component groups for debugging purposes.
    if logger.isEnabledFor(DEBUG_DETAILED):
        for part in parts:
            for f in dir(part):
                if f.startswith('__'):
                    continue
                elif f.startswith('html_trees'):
                    continue
                else:
                    print('{} = '.format(f), end=' ')
                    try:
                        pprint.pprint(part.__dict__[f])
                    except TypeError:
                        # Python 2.7 pprint has some problem ordering None and strings.
                        print(part.__dict__[f])
                    except KeyError:
                        pass
            print()