def update_nuclear_from_ceps(self):
     mn.parse_arguments()
     nuclear_only = [x for x in self.ceps_data if
                     x['type_secondary'] == 'JE' and x['start'] > datetime.now() - timedelta(days=mn.get_argv().days_back)]
     for ceps in nuclear_only:
         curr_item = {k: v for k, v in mn.saved_data.iteritems() if v['date'] == ceps['start'].strftime('%Y-%m-%d')}
         if len(curr_item) != 0:
             v = curr_item.values()[0]
             v['content'][ceps['start'].hour] = ceps['quantity']
             mn.saved_data[curr_item.keys()[0]] = v
         else:
             data = {}
             data['date'] = ceps['start'].strftime('%Y-%m-%d')
             data['country'] = self.ml_object.country
             data['data_type_secondary'] = 'nuclear'
             data['data_type'] = 'generation'
             data['content'] = {ceps['start'].hour: ceps['quantity']}
             data['approximation'] = []
             mn.saved_data[len(mn.saved_data)] = data
             # print data
     params = {'country': self.ml_object.country, 'type': 'generation','type_secondary':'nuclear'}
     mn.save_all_data(params)
 def download_generation(self):
     params = {}
     mn.parse_arguments()
     params['type'] = 'generation'
     for country in dp.get_all_generation_countries():
         print 'Begin country: ' + country
         params['country'] = country
         # ml = ML.ML(country)
         # self.data = ml.data
         # self.approx_data = ml.approximated_data
         # self.all_sources = ml.all_sources
         for gen_type in dp.get_all_generation_types():
             print ' == Begin type: ' + dp.psr_types[gen_type]
             params['type_secondary'] = dp.psr_types[gen_type]
             params['start'] = datetime.now().replace(
                 microsecond=0, second=0, minute=0,
                 hour=0) - timedelta(days=mn.get_argv().days_back)
             params['end'] = datetime.now().replace(
                 microsecond=0, second=0,
                 minute=0)  # params['start'] + timedelta(days=2)
             self.__get_all_dates_for_params(params)
         # self.__append_aproximation_data(mn.saved_data)
         # ml.close_database_connection()
         mn.save_all_data(params)
def main():
    start_time = time.time()

    options = master.parse_arguments()
    use_wavelet = options.wavelet
    in_fasta_handle = open(options.in_fasta_filename, "rU")

    # script not fully tested with multiple records
    for record in SeqIO.parse(in_fasta_handle, "fasta"):
        record_id = record.id
        sequence = str(record.seq)

        rotate_handle = open("rotate.txt", "w")

        for i in range(0, len(sequence), 500):
            wrap_sequence = sequence[i:] + sequence[:i]

            # Two possible methods: wavelet transforms or multiscaling
            if use_wavelet:
                wavelet = master.WaveletTransform(options)
                details_filename = wavelet.dwt(wrap_sequence)
                wavelet_data = wavelet.get_data_from_R(details_filename)
                origin_scaled = wavelet.get_origin(wavelet_data)
                origin = int(origin_scaled * len(sequence))
            else:
                multiscale = master.Multiscaling(options)
                origin = multiscale.multiscaling(wrap_sequence,
                                                 in_fasta_handle, record_id)

            origin += i
            while origin >= len(sequence):
                origin -= len(sequence)
            distance = min(origin, len(sequence) - origin)
            print "i:", i, ", origin:", origin, ", distance:", distance
            rotate_handle.write(str(i) + "\t" + str(distance) + "\n")

        rotate_handle.close()
        #rearrange_fasta(origin, record_id, sequence, options)

    in_fasta_handle.close()

    print "Runtime:", time.time() - start_time, "seconds"
def main():
 	start_time = time.time()

	options = master.parse_arguments()
	use_wavelet = options.wavelet
	in_fasta_handle = open(options.in_fasta_filename, "rU")
	
	# script not fully tested with multiple records
	for record in SeqIO.parse(in_fasta_handle, "fasta"):
		record_id = record.id
		sequence = str(record.seq)

		rotate_handle = open("rotate.txt", "w")	

		for i in range(0, len(sequence), 500):
			wrap_sequence = sequence[i:] + sequence[:i]

			# Two possible methods: wavelet transforms or multiscaling
			if use_wavelet:
				wavelet = master.WaveletTransform(options)
				details_filename = wavelet.dwt(wrap_sequence)
				wavelet_data = wavelet.get_data_from_R(details_filename)
				origin_scaled = wavelet.get_origin(wavelet_data)
				origin = int(origin_scaled * len(sequence))
			else:
				multiscale = master.Multiscaling(options)
				origin = multiscale.multiscaling(wrap_sequence, in_fasta_handle, record_id)

			origin += i
			while origin >= len(sequence):
				origin -= len(sequence)
			distance = min(origin, len(sequence) - origin)
			print "i:", i, ", origin:", origin, ", distance:", distance
			rotate_handle.write(str(i) + "\t" + str(distance) + "\n")

		rotate_handle.close()
		#rearrange_fasta(origin, record_id, sequence, options)

	in_fasta_handle.close()

	print "Runtime:", time.time() - start_time, "seconds"
Beispiel #5
0
        super(GatingMechanism, self).__init__()
        self.params = params
        with open(self.params.entity_tran, 'rb') as f:
            transE_embedding = pkl.load(f)
        self.enti_tran = nn.Embedding.from_pretrained(
            torch.from_numpy(transE_embedding).float())
        entity_num = transE_embedding.shape[0]
        # gating 的参数

        self.gate_theta = Parameter(
            torch.empty(entity_num, self.params.hidden_dim))
        nn.init.xavier_uniform_(self.gate_theta)

        # self.dropout = nn.Dropout(self.params.dropout)

    def forward(self, X: torch.FloatTensor, Y: torch.LongTensor):
        '''
        :param X:   LSTM 的输出tensor   |E| * H
        :param Y:   Entity 的索引 id    |E|,
        :return:    Gating后的结果      |E| * H
        '''
        gate = torch.sigmoid(self.gate_theta[Y])
        Y = self.enti_tran(Y)
        output = torch.mul(gate, X) + torch.mul(-gate + 1, Y)
        return output


if __name__ == '__main__':
    from main import parse_arguments
    GatingMechanism(parse_arguments())
Beispiel #6
0
                    raise ValueError("Unknown network")

                ground_truth = os.path.join(WORKING_DIR, "datasets",
                                            "network_%s.txt" % network)
                measure = compute_scores(ground_truth, fname, parameters)
                row = deepcopy(parameters)
                row.update(measure)
                pprint(row)
                results.append(row)

        else:
            n_jobs_launched += 1
            cmd_parameters = " ".join("--%s %s" % (key, parameters[key])
                                      for key in sorted(parameters))

            scripts_args = parse_arguments(shlex.split(cmd_parameters))
            if make_hash(scripts_args) != job_hash:
                pprint(scripts_args)
                pprint(parameters)
                raise ValueError("hash are not equal, all parameters are "
                                 "not specified.")

            cmd = submit(job_command="%s main.py %s" %
                         (sys.executable, cmd_parameters),
                         job_name=job_hash,
                         time="100:00:00",
                         memory=24000,
                         log_directory=LOG_DIRECTORY,
                         backend="slurm")

            if not args["debug"]:
Beispiel #7
0
                ground_truth = os.path.join(WORKING_DIR, "datasets",
                                            "network_%s.txt" % network)
                measure = compute_scores(ground_truth, fname, parameters)
                row = deepcopy(parameters)
                row.update(measure)
                pprint(row)
                results.append(row)


        else:
            n_jobs_launched += 1
            cmd_parameters = " ".join("--%s %s" % (key, parameters[key])
                                      for key in sorted(parameters))

            scripts_args = parse_arguments(shlex.split(cmd_parameters))
            if make_hash(scripts_args) != job_hash:
                pprint(scripts_args)
                pprint(parameters)
                raise ValueError("hash are not equal, all parameters are "
                                 "not specified.")

            cmd = submit(job_command="%s main.py %s"
                                     % (sys.executable, cmd_parameters),
                         job_name=job_hash,
                         time="100:00:00",
                         memory=24000,
                         log_directory=LOG_DIRECTORY,
                         backend="slurm")

            if not args["debug"]:
Beispiel #8
0
    fig_name = None
    if not argv.plot_special_n3 and not argv.plot_special_n4:
        fig, ax = plt.subplots()

        if len(Ns) == 1:
            Ns = list(range(1, len(Links) + 1))
            xlabel = 'Number of Links with channel assigned'
            ylabel = 'Frictional Network Interference'
            fig_name = "base_fni_nlink.png"
        else:
            xlabel = 'Number of Nodes'
            ylabel = 'Frictional Network Interference'
            fig_name = "base_fni_n.png"
        ax.plot(Ns, fni_list)

        ax.set_xlabel(xlabel)
        ax.set_ylabel(ylabel)
        # ax.set_yscale('log')
        fig_path = os.path.join(argv.fig_root, fig_name)

        print(f'Saving to {fig_path}')
        plt.savefig(fig_path, format='png', bbox_inches='tight')
        print()

    return Ns, fni_list, xlabel, ylabel, fig_name


if __name__ == '__main__':
    argv = main.parse_arguments("--plot sepcial n4 --use base".split())
    test_base_method(argv)
Beispiel #9
0
    from main import CONSOLE_ARGUMENTS
    im_directory = CONSOLE_ARGUMENTS.im_directory
    mask_directory = CONSOLE_ARGUMENTS.mask_directory
    gt_directory = CONSOLE_ARGUMENTS.gt_directory
    files_to_process = CONSOLE_ARGUMENTS.numFiles
    tm = CONSOLE_ARGUMENTS.tm

    signals_list = calculateImagesMetrics(im_directory,
                                          mask_directory,
                                          gt_directory,
                                          files_to_process=files_to_process)
    signal_type_dict = create_signal_type_dict(signals_list)
    if (tm): print_results_signal_type_dict(signal_type_dict)

    return signal_type_dict


def test_metrics():
    from main import CONSOLE_ARGUMENTS
    print(CONSOLE_ARGUMENTS)
    files_to_process = CONSOLE_ARGUMENTS.numFiles

    return get_dictionary()


if __name__ == '__main__':
    # read arguments
    from main import parse_arguments
    parse_arguments()
    test_metrics()
Beispiel #10
0
def test_gui():
    sys.argv = ['main.py', 'gui']
    main.parse_arguments()
    return
	if not argv.plot_special_n3 and not argv.plot_special_n4:
		fig, ax = plt.subplots()

		if len(Ns) == 1:
			Ns = list(range(1, len(Links) + 1))
			xlabel = 'Number of Links with channel assigned'
			ylabel = 'Frictional Network Interference'
			fig_name = "fni_nlink.png"
		else:
			xlabel = 'Number of Nodes'
			ylabel = 'Frictional Network Interference'
			fig_name = "fni_n.png"
		ax.plot(Ns, fni_list)

		ax.set_xlabel(xlabel)
		ax.set_ylabel(ylabel)
		# ax.set_yscale('log')
		fig_path = os.path.join(argv.fig_root, fig_name)

		print(f'Saving to {fig_path}')
		plt.savefig(fig_path, format='png', bbox_inches='tight')
		print()

	return Ns, fni_list, xlabel, ylabel, fig_name



if __name__ == '__main__':
	argv = main.parse_arguments([])
	test_our_method(argv)