Пример #1
0
    def start(self):

        self.input_controller.fault_tolerance_request()
        self.input_controller.initial_groups_request()
        self.input_controller.blocked_vector_request()

        tested_faults = len(max(self.input_controller.initial_groups, key=len))

        processors = Processors(self.input_controller.initial_groups)
        generator = FormulasGenerator(processors,
                                      self.input_controller.fault_tolerance)
        selector = Selector(self.input_controller.blocked_vectors)
        tester = PrTester(processors)
        blocker = Blocker(self.input_controller.blocked_vectors, tester,
                          selector, processors)
        '''
            For original formulas
        '''
        generator.generate(self.input_controller.initial_groups,
                           self.original_formulas_file)
        optimizer_original = Optimizer(self.input_controller.fault_tolerance,
                                       processors,
                                       self.optimized_original_file)
        optimizer_original.optimize(generator.origin_formulas_arr)
        blocking_original_formulas = blocker.block_original(
            selector.select_group(optimizer_original.result_formulas))
        tester.test(self.result_of_testing_original_file,
                    blocking_original_formulas, tested_faults,
                    self.input_controller.fault_tolerance)
        '''
            For researched formulas
        '''
        self.input_controller.researched_way_request()
        if self.input_controller.researched_way:

            generator.generate(
                processors.form_groups_according_to_blocked_vector(
                    self.input_controller.blocked_vectors),
                self.researched_formulas_file)
            optimizer_researched = Optimizer(
                self.input_controller.fault_tolerance, processors,
                self.optimized_researched_file)
            optimizer_researched.optimize(generator.origin_formulas_arr)
            #selector.select_group(optimizer_researched.result_formulas)
            blocking_researched_formulas = blocker.block_researched(
                selector.select_group(optimizer_researched.result_formulas))
            tester.test(self.result_of_testing_researched_file,
                        blocking_researched_formulas, tested_faults,
                        self.input_controller.fault_tolerance)

        self.input_controller.repeat_request()

        return self.input_controller.repeat
Пример #2
0
def optimize():

    optimizer = Optimizer()
    optimizer.strategy_class = RSIStrategy
    #optimizer.data_feed = DataFeedList(['20081210.SPY.1m.csv','20081211.SPY.1min.csv','20081212.SPY.1min.csv'],data_type='B')
    #optimizer.data_feed = DataFeedList(['20081210.SPY.30s.csv','20081211.SPY.30s.csv','20081212.SPY.30s.csv'],data_type='I')
    optimizer.data_feed = DataFeedList(['SPY.csv'], data_type='D')

    ## set population size
    optimizer.size = 40
    optimizer.max_generations = 50
    optimizer.outfile = '%s_%s.xls' % (__file__[:-3],
                                       datetime.now().strftime("%Y%m%d"))
    optimizer.tolerance = 0.01
    #optimizer.reset_on_EOD = False

    ## parameter space to search over
    ## rsi = rsi length
    ## top/btm = rsi buy sell thresholds
    ## average(optional) trend filter
    ## duration = trade duration

    optimizer.add_parameter(
        dict(name='rsi', min_val=10, max_val=40, steps=16, converter=int))
    optimizer.add_parameter(
        dict(name='top', min_val=60, max_val=80, steps=4, converter=int))
    optimizer.add_parameter(
        dict(name='btm', min_val=20, max_val=40, steps=4, converter=int))
    optimizer.add_parameter(
        dict(name='average', min_val=20, max_val=200, steps=64, converter=int))
    optimizer.add_parameter(
        dict(name='duration', min_val=5, max_val=20, steps=8, converter=int))

    optimizer.run()
Пример #3
0
def main():
    optim = Optimizer(PATCH_SIZE, RANGE_OF_ONES, GRANULARITY_TH, ACC_LOSS)
    optim.base_line_result()
    optim.by_uniform_layers()
    optim.by_uniform_filters()
    optim.by_uniform_patches()
    optim.by_max_granularity()
Пример #4
0
    def __init__(self, inputs, input_length, targets, target_length,
                 max_target_length, num_layers, rnn_size, sample_prob,
                 keep_prob, learning_rate, num_labels, embedding_size,
                 batch_size, length_cost_prop, GO, EOS):
        self.GO = GO
        self.EOS = EOS
        self.batch_size = batch_size
        self.num_labels = num_labels

        with tf.name_scope("decoder") as scope:
            self.output_embeddings = self.generate_embeddings(embedding_size)
            self.length_predictions = self.length_detection(
                inputs, target_length)
            self.logits, self.predictions = self.decoding_layer(
                inputs, input_length, targets, target_length,
                max_target_length, num_layers, rnn_size, sample_prob,
                keep_prob)
            self.cost = self.calculate_loss(self.logits, targets,
                                            self.length_predictions,
                                            target_length, max_target_length,
                                            length_cost_prop)
            self.optimizer = Optimizer(learning_rate)
            self.train_op = self.optimizer.apply_gradients(self.cost)
        self.summary_op = tf.summary.merge(
            tf.get_collection(tf.GraphKeys.SUMMARIES, scope='decoder'))
Пример #5
0
def test_optimizer2():
    optimizer = Optimizer()
    run_id = 0

    ## set population size
    optimizer.size = 40
    optimizer.max_generations = 5

    ## if not given: outfile will default to using the strategy name
    ## optimizer.outfile ='retrace_optimized.xls'

    ## parameter space to search over
    ## momentum = entry momentum crossover
    ## average = moving average filter
    ## duration = trade holding period

    param_list = [
        dict(name='momentum', min_val=10, max_val=100, steps=32,
             converter=int),
        dict(name='average', min_val=20, max_val=200, steps=32, converter=int),
        dict(name='duration', min_val=10, max_val=50, steps=16, converter=int)
    ]

    optimizer.add_parameters(param_list)

    while not optimizer.converged():
        params_set = optimizer.generate_set()
        for strategy_params in params_set:
            log.debug('Testing: %s' % pprint.pformat(strategy_params))
            sim_stats = test_retrace_strategy(strategy_params, run_id)
            optimizer.score(sim_stats, run_id)
            run_id += 1
        optimizer.dump()

    optimizer.write()
Пример #6
0
def Main(algorithm, problem, pop_size, crossover_probability,
         mutation_probability, n_partitions, n_gen, seed):

    # Instancia el problema
    problem = Problems.get(problem)

    reference_directions = get_reference_directions("das-dennis",
                                                    problem.n_obj,
                                                    n_partitions=n_partitions)

    # Instancia el algoritmo
    algorithm = NSGA_II.Get_Algorithm_Instance(
        pop_size, crossover_probability, mutation_probability
    ) if (algorithm == Algorithms.NSGAII) else NSGA_III.Get_Algorithm_Instance(
        reference_directions, pop_size, crossover_probability,
        mutation_probability) if (algorithm == Algorithms.NSGAIII) else None

    # Instancia el optimizador
    optimizer = Optimizer(problem, algorithm)

    optimization_result = optimizer.Minimize(n_gen, seed)
    objective_spaces_values = optimization_result.F

    pareto_front = problem.pareto_front(reference_directions) if type(
        problem).__name__ == "DTLZ1" else problem.pareto_front()

    # Instancia los indicadores de rendimiento (Distancia Generacional Invertida (IGD) / Distancia Generacional Invertida Plus (IGD+))
    IGD = get_performance_indicator("igd", pareto_front)
    #IGD_plus = get_performance_indicator("igd+", pareto_front)

    # Imprime las métricas obtenidas por el conjunto de soluciones resultantes de la optimización multimodal/multiobjetivo
    print("IGD:", IGD.calc(objective_spaces_values))
Пример #7
0
def test_optimizer3():
    optimizer = Optimizer()
    run_id = 0

    ## set population size
    optimizer.size = 40
    optimizer.max_generations = 5
    optimizer.outfile = 'triple_optimized.xls'

    ## parameter space to search over
    ## mo1 = short term momentum crossover
    ## mo2 = medium term mo filter
    ## mo3 = long term mo filter
    ## duration = trade holding period

    param_list = [
        dict(name='mo1', min_val=5, max_val=50, steps=32, converter=int),
        dict(name='mo2', min_val=60, max_val=100, steps=32, converter=int),
        dict(name='mo3', min_val=110, max_val=200, steps=32, converter=int),
        dict(name='duration', min_val=10, max_val=50, steps=16, converter=int)
    ]

    optimizer.add_parameters(param_list)

    while not optimizer.converged():
        params_set = optimizer.generate_set()
        for strategy_params in params_set:
            log.debug('Testing: %s' % pprint.pformat(strategy_params))
            sim_stats = test_triple_strategy(strategy_params, run_id)
            optimizer.score(sim_stats, run_id)
            run_id += 1
        optimizer.dump()

    optimizer.write()
Пример #8
0
	def __init__(self, dataloader, hierarchical_transformer, config, i):

		super(Trainer, self).__init__()

		self.iter = i
		self.config = config
		self.cpu = torch.device("cpu")
		self.multi_gpu = len(self.config.gpu_idx) > 1

		self.dataloader = dataloader
		self.word_encoder = WordEncoder.WordEncoder(config, self.dataloader.tweet_field.vocab)
		self.word_pos_encoder = PositionEncoder.PositionEncoder(config, self.config.max_length)
		self.time_delay_encoder = PositionEncoder.PositionEncoder(config, self.config.size)

		# <----------- Check for GPU setting ----------->
		if self.config.gpu:

			self.hierarchical_transformer = DataParallelModel(hierarchical_transformer.cuda())
			self.criterion = DataParallelCriterion(nn.NLLLoss())

		else:
			self.hierarchical_transformer = hierarchical_transformer
			self.criterion = nn.NLLLoss()

		self.adam_optimizer = optim.Adam(self.hierarchical_transformer.parameters(), np.power(self.config.d_model, - 0.5), betas = (self.config.beta_1, self.config.beta_2))
		self.optimizer = Optimizer.Optimizer(self.config, self.adam_optimizer)
Пример #9
0
def do_stuff_with_map(map):

    # the following code is exactly the same as in do_stuff_with_map in ROSstuff.py

    # setting the parameters for the STL specification generator
    time_bound = 20
    goal = (3, 1)
    accuracy = 0.25
    time_steps = time_bound + 1

    # setting the parameters for the optimizer
    initial_state = np.asarray([0.5, 0, 0.5, 0])[:, np.newaxis]
    u_guess = np.zeros((2, time_steps)).flatten()
    u_guess = np.asarray(
        [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [
             0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
             0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1
         ]])

    # optimization method
    method = 'Powell'

    my_reachavoid = ReachAvoid(map, time_bound, goal, accuracy)
    ax = my_reachavoid.return_region()
    my_finished_specification = my_reachavoid.full_spec

    my_optimizer = Optimizer(initial_state, my_finished_specification,
                             time_bound, time_steps, u_guess, ax)
    optimal_trajectory = my_optimizer.optimize(method)
    print("robustness: %s" % (my_optimizer.rho(optimal_trajectory)))
    my_optimizer.plot_trajectory(optimal_trajectory)

    print(my_reachavoid.full_spec)
Пример #10
0
def long(generations:int):
	data = "/netapp/home/tianjiao.zhang/data/microstates.dat";
	targetFreqs = "/netapp/home/tianjiao.zhang/data/ecDHFR_openseq_bacterial_representative_final_align_trim.fasta";

	MACROSTATES = enum("E-DHF-NADPH", "E-NADPH", "E-THF", "E-THF-NADPX", "TS");
	RESIDUES = enum('A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y');

	ensembleSizes = numpy.array([16, 24, 32, 64, 128]);
	backrubTemps = numpy.array([0.3, 0.6, 0.9, 1.2, 1.5, 1.8]);
	boltzmannTemps = numpy.array([-1, 5]);
	steepnessRange = numpy.array([1, 7]);
	minWeights = numpy.array([0, 0, 0, 0, 0]);
	maxWeights = numpy.array([1, 1, 1, 1, 1]);
	
	optimizer = Optimizer(MACROSTATES, True);
	optimizer.readTargetFrequencies(targetFreqs);	
	optimizer.readFormattedMicrostateData(data);
		
	search = CuckooSearch(optimizer.models, JensenShannonDistance(optimizer.targetFrequencies), True, 128, 1.25, 0.25);
	search.setMaxIterations(generations);
	search.setParamBounds(ensembleSizes, backrubTemps, boltzmannTemps, steepnessRange, minWeights, maxWeights);
	search.setAllSearchToTrue();
	search.suppressOutputs = True;
	optimizer.useAlgorithm(search);
	optimizer.optimize();
	now = datetime.now();
	optimizer.writeFrequenciesToFASTA(optimizer.getBestFrequencies(), "var ensembles " + now.strftime('%Y%m%d%H%M') + ".fasta");
	optimizer.writeBestParamsToText("var ensembles " + now.strftime('%Y%m%d%H%M'));
Пример #11
0
    def runMesh(self):
        t0 = time.time()
        while (self.Ract <= self.Rmax):
            print("----------------------------------")
            print("actT: " + str(self.Tact) + " - " + str(self.mis.sec))
            #print("Torig: " + str(self.Torigin))
            print("actV: " + str(self.Vact) + " - " + str(self.mis.tas))
            print("actR: " + str(self.Ract / 1000.) + " - " +
                  str(self.mis.calcDistance() / 1000.))

            self.mis.saveMission()

            verbose = False
            opt = Optimizer(self.mis, verbose=verbose, savePlots=False)
            opt.saveFlights(verbose=False)
            del opt

            ini = Ini(self.mis.NAME)
            ini.copyConfig(self.mis.getResultsPath() + "config.ini")
            del ini

            self.nextMission()
            #self.mis.plotMission()
        t1 = time.time()
        print('[Mesh] Runtime [s]: %f' % (t1 - t0))
Пример #12
0
    def execute(self):
        partnerDataReader = PartnerDataReader(self.partnerId)
        perPartnerSimulator = PerPartnerSimulator()

        self.currentDay = 0
        perClickCost = perPartnerSimulator.getPerClickCost(
            partnerDataReader.getDay(-1))
        print("PerClickCost: " + str(perClickCost))
        accumulatedProfits = []
        days = []
        accumulatedProfit = 0.00
        allProducts = []

        jsonLog = {}
        jsonLog['days'] = []

        for x in range(self.allDays):
            data = partnerDataReader.getDay(self.currentDay)
            profit = perPartnerSimulator.calculatePerDayProfitGainFactors(
                data, perClickCost)
            accumulatedProfit = accumulatedProfit + profit
            timeCol = data[3]
            print("Day " + str(x) + ": " + str(profit) + " Accumulated: " +
                  str(accumulatedProfit))
            accumulatedProfits.append(accumulatedProfit)
            days.append(self.currentDay)

            products = perPartnerSimulator.getProducts(data)

            for y in range(len(products)):
                if products[y] not in allProducts:
                    allProducts.append(products[y])
                    #allProducts.append(str(products[y]))
                    #print(datetime.utcfromtimestamp(timeCol[y]))

            optimizer = Optimizer(allProducts)
            excluded = optimizer.getExcludedProductsPseudoradnomly()
            excluded.sort()

            print("Excluded: " + str(len(excluded)))
            for y in range(len(excluded)):
                print(excluded[y])

            dateToJson = datetime.utcfromtimestamp(
                timeCol[0]) + timedelta(days=1)
            jsonLog['days'].append({
                'day':
                str(dateToJson.year) + "-" + str(dateToJson.month) + "-" +
                str(dateToJson.day),
                'productsToExclude':
                excluded,
            })

            self.currentDay = self.currentDay + 1

        with open('log.json', 'w') as outfile:
            json.dump(jsonLog, outfile)

        print('JSON saved')
Пример #13
0
def testRandUniformInput():
	MACROSTATES = enum("E-DHF-NADPH", "E-NADPH", "E-OPEN", "E-THF", "E-THF-NADPX", "TS");
	RESIDUES = enum('A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y');

	# only looking at MACROSTATE.TS
	# only optimizing backrub temperature and steepness
	ensembleSizes = numpy.array([50]);
	backrubTemps = numpy.array([0.3, 0.6, 0.9, 1.2, 1.5, 1.8]);
	boltzmannTemps = numpy.array([0, -1, 1, 5.0]);
	steepnessRange = numpy.array([0.5, 5]);
	minWeights = numpy.array([0, 0, 0, 0, 0, 0]);
	maxWeights = numpy.array([1, 1, 0, 1, 1, 1]);

	print("Initializing objects\n");

	targetFreqs = "C:\\Users\\Candy\\SkyDrive\\Documents\\rotation 2\\ecDHFR_openseq_bacterial_representative_final_align_trim.fasta";
	targetFreqsAlt = "C:\\Users\\Candy_000\\SkyDrive\\Documents\\rotation 2\\ecDHFR_openseq_bacterial_representative_final_align_trim.fasta";
	data = "C:\\Users\\Candy\\SkyDrive\\Documents\\rotation 2\\DHFR_MSD_M20loop\\DHFR_MSD_M20loop_repeat1.tsv";
	dataAlt = "C:\\Users\\Candy_000\\SkyDrive\\Documents\\rotation 2\\DHFR_MSD_M20loop\\DHFR_MSD_M20loop_repeat1.tsv";
	
	optimizer = Optimizer(MACROSTATES);

	# slightly different paths on my two computers
	try:
		optimizer.readTargetFrequencies(targetFreqs);	
		optimizer.readData(data);
	except:
		optimizer.readTargetFrequencies(targetFreqsAlt);	
		optimizer.readData(dataAlt);

	# make energies uniform
	for model in optimizer.models:
		optimizer.models[model].macrostateResidueEnergies = numpy.ones_like(optimizer.models[model].macrostateResidueEnergies);

	search = CuckooSearch(optimizer.models, JensenShannonDistance(optimizer.targetFrequencies), False, 1, 1, 0.25);
	search.setMaxIterations(1);
	search.setParamBounds(ensembleSizes, backrubTemps, boltzmannTemps, steepnessRange, minWeights, maxWeights);
	search.setSearchParameters(False, False, False, False, numpy.array([False, False, False, False, False, False]));
	optimizer.useAlgorithm(search);

	outfile = open("uniform energy similarities.txt", 'w');
	optimizer.optimize();
	outfile.write("JSD: {:.4f}\n".format(optimizer.getBestParameters()['match']));

	search.setSimilarityMeasure(CosineSimilarity(optimizer.targetFrequencies));
	optimizer.optimize();
	outfile.write("Cosine similarity: {:.4f}\n".format(optimizer.getBestParameters()['match']));

	search.setSimilarityMeasure(KLDivergence(optimizer.targetFrequencies));
	optimizer.optimize();
	outfile.write("K-L divergence: {:.4f}\n".format(optimizer.getBestParameters()['match']));

	search.setSimilarityMeasure(EntropyWeightsMixedSimilarity(CosineSimilarity(), JensenShannonDistance(), optimizer.targetFrequencies));
	optimizer.optimize();
	outfile.write("Weighted mixed similarity: {:.4f}\n".format(optimizer.getBestParameters()['match']));
	outfile.close();
	return None;
Пример #14
0
def run_all_acc_loss_possibilities(ps,
                                   ones_range,
                                   gran_th,
                                   mode=None,
                                   acc_loss_opts=ACC_LOSS_OPTS,
                                   patterns_idx=None):
    for acc_loss in acc_loss_opts:
        optim = Optimizer(ps, ones_range, gran_th, acc_loss)
        optim.run_mode(mode)
Пример #15
0
 def __init__(self, inputs, targets, learning_rate, batch_size):
     self.batch_size = batch_size
     with tf.variable_scope("edge_detection"):
         self.output = self.edge_detection(inputs)
         self.cost = self.calculate_loss(self.output, targets)
         self.optimizer = Optimizer(learning_rate)
         self.train_op = self.optimizer.apply_gradients(self.cost)
     self.summary_op = tf.summary.merge(
         tf.get_collection(tf.GraphKeys.SUMMARIES, scope='edge_detection'))
Пример #16
0
def run_all_ones_possibilities(ps,
                               ones_possibilities,
                               gran_th,
                               acc_loss,
                               mode=None):
    for ones in ones_possibilities:
        optim = Optimizer(ps, (ones, ones + 1), gran_th, acc_loss)
        optim.print_runtime_eval()
        optim.run_mode(mode)
Пример #17
0
 def __init__(self, inputs, targets, learning_rate, min_mean, max_mean):
     self.min_mean = min_mean
     self.max_mean = max_mean
     with tf.variable_scope("mean_detection"):
         logits = self.mean_detection(inputs)
         self.output = self.inverse_scale_mean_targets(logits)
         self.cost = self.calculate_loss(logits, targets)
         self.optimizer = Optimizer(learning_rate)
         self.train_op = self.optimizer.apply_gradients(self.cost)
     self.summary_op = tf.summary.merge(
         tf.get_collection(tf.GraphKeys.SUMMARIES, scope='mean_detection'))
Пример #18
0
def smalltestPrevOptimalVals():
	MACROSTATES = enum("E-DHF-NADPH", "E-NADPH", "E-OPEN", "E-THF", "E-THF-NADPX", "TS");
	RESIDUES = enum('A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y');

	# only looking at MACROSTATE.TS
	# only optimizing backrub temperature and steepness
	ensembleSizes = numpy.array([50]);
	backrubTemps = numpy.array([1.8]);
	boltzmannTemps = numpy.array([0.0]);
	steepnessRange = numpy.array([3.0]);
	minWeights = numpy.array([0.80, 0.55, 0, 0.90, 0.35, 1.00]);
	maxWeights = numpy.array([0.80, 0.55, 0, 0.90, 0.35, 1.00]);

	print("Initializing objects\n");

	targetFreqs = "C:\\Users\\Candy\\SkyDrive\\Documents\\rotation 2\\ecDHFR_openseq_bacterial_representative_final_align_trim.fasta";
	targetFreqsAlt = "C:\\Users\\Candy_000\\SkyDrive\\Documents\\rotation 2\\ecDHFR_openseq_bacterial_representative_final_align_trim.fasta";
	data = "C:\\Users\\Candy\\SkyDrive\\Documents\\rotation 2\\DHFR_MSD_M20loop\\DHFR_MSD_M20loop_repeat6.tsv";
	dataAlt = "C:\\Users\\Candy_000\\SkyDrive\\Documents\\rotation 2\\DHFR_MSD_M20loop\\DHFR_MSD_M20loop_repeat5.tsv";
	
	optimizer = Optimizer(MACROSTATES);

	# slightly different paths on my two computers
	try:
		optimizer.readTargetFrequencies(targetFreqs);	
		optimizer.readData(data);
	except:
		optimizer.readTargetFrequencies(targetFreqsAlt);	
		optimizer.readData(dataAlt);

	print("Files read in");

	search = CuckooSearch(optimizer.models, JensenShannonDistance(optimizer.targetFrequencies), False, 1, 1, 0.25);
	search.setMaxIterations(1);
	search.setParamBounds(ensembleSizes, backrubTemps, boltzmannTemps, steepnessRange, minWeights, maxWeights);
	search.setSearchParameters(False, False, False, False, numpy.array([False, False, False, False, False, False]));
	optimizer.useAlgorithm(search);

	#print("Cos similiarity");
	#optimizer.optimize();	
	#optimizer.writeFrequenciesToFASTA(optimizer.getBestFrequencies(), "testOutCos.fasta");
	#print(optimizer.getBestParameters()['match']);

	print("\nJS Dist");
	#search.setSimilarityMeasure(JensenShannonDistance(optimizer.targetFrequencies));
	optimizer.optimize();
	now = datetime.now();
	optimizer.writeFrequenciesToFASTA(optimizer.getBestFrequencies(), "prev opt vals " + now.strftime('%Y%m%d%H%M') + ".fasta");
	optimizer.writeBestParamsToText("prev opt vals " + now.strftime('%Y%m%d%H%M'))
	print(optimizer.getBestParameters()['match']);
	
	return None;
Пример #19
0
    def __init__(self, X_train, Y_train, activation_function, optimizer):

        self.input = X_train
        self.weights1 = np.random.rand(X_train.shape[1], 7)
        self.bias1 = np.zeros((X_train.shape[0], 7))
        self.weights2 = np.random.rand(7, 1)
        self.bias2 = np.zeros((Y_train.shape[1], 1))
        self.y = Y_train
        self.output = np.zeros(Y_train.shape)
        self.optimizer = optimizer  #string
        self.Optimizer = Optimizer(activation_function)  #object
        self.activation_function = activation_function  #string
        self.Activation = Activation_Function()  #object
Пример #20
0
def do_optimization(n,m,k,tau,lower_bounds, upper_bounds, r, rN, \
            max_normal, sorted_index, max_processes, multi_event, get_values):
    """
    Performs the optimization for the given parameters with max_proccesses
    number of processes
    Returns a list of the best C matrices and associated mu values
    and likelihoods
    """
    enum = Enumerator(n, m, k, tau, lower_bounds, upper_bounds, multi_event)
    opt = Optimizer(r, rN, m, n, tau, upper_bound=max_normal)
    MAX_QUEUE_SIZE = int(10E6)
    try:
        queue = Queue(MAX_QUEUE_SIZE)  #Task queue for the processes
    except OSError:
        MAX_QUEUE_SIZE = 2**15 - 1
        queue = Queue(MAX_QUEUE_SIZE)

    returnQueue = Queue(
        MAX_QUEUE_SIZE)  #Shared queue for processes to return results

    processes = start_processes(max_processes, queue, opt, returnQueue, \
                sorted_index, get_values)

    # fix problem with missing first matrix
    #C = enum.generate_next_C()
    C = enum._C_to_array()
    count = 0
    while C is not False:
        count += 1
        queue.put(C, True)
        C = enum.generate_next_C()
    if count == 0:
        print "Error: No valid Copy Number Profiles exist for these intervals within the bounds specified. Exiting..."
        sys.exit(1)

    # Send STOP signal to all processes
    for i in range(max_processes - 1):
        queue.put(0)

    best = []
    for i in range(len(processes)):
        item = returnQueue.get()
        best.append(item)

    for p in processes:
        p.join()

    best = find_mins(best)
    return best
Пример #21
0
def do_optimization_single(n,m,k,tau,lower_bounds, upper_bounds, r, rN, \
            max_normal, sorted_index, multi_event, get_values):
    """
    Performs the optimization for the given parameters with a single process
    Returns a list of the best C matrices and associated mu values
    and likelihoods
    """

    enum = Enumerator(n, m, k, tau, lower_bounds, upper_bounds, multi_event)
    opt = Optimizer(r, rN, m, n, tau, upper_bound=max_normal)
    min_likelihood = float("inf")
    best = []
    count = 0

    #fix missing first matrix problem
    C = enum._C_to_array()
    #C = enum.generate_next_C()
    if get_values: solns = []
    while C is not False:
        count += 1
        soln = opt.solve(C)
        if soln is not None:
            (mu, likelihood, vals) = soln

            if get_values: solns.append((C, mu, likelihood))
            if isClose([likelihood], [min_likelihood]):
                C_new = reverse_sort_C(C, sorted_index)
                vals = reverse_sort_list(vals, sorted_index)
                best.append((C_new, mu, likelihood, vals))
            elif likelihood < min_likelihood:
                C_new = reverse_sort_C(C, sorted_index)
                vals = reverse_sort_list(vals, sorted_index)
                best = [(C_new, mu, likelihood, vals)]
                min_likelihood = likelihood

        C = enum.generate_next_C()

    if get_values:
        with open(pre + "." + "likelihoods", 'w') as f:
            for C, mu, likelihood in solns:
                m, n = C.shape
                stringC = "".join((str(int(C[i][1])) for i in range(m)))
                f.write(stringC + "\t" + str(mu[0]) + "\t" + str(likelihood) +
                        "\n")

    if count == 0:
        print "Error: No valid Copy Number Profiles exist for these intervals within the bounds specified. Exiting..."
        sys.exit(1)
    return best
Пример #22
0
def testChi2(iterations = 64):
	print("Hello!\n");
	MACROSTATES = enum("E-DHF-NADPH", "E-NADPH", "E-OPEN", "E-THF", "E-THF-NADPX", "TS");
	RESIDUES = enum('A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y');

	# only looking at MACROSTATE.TS
	# only optimizing backrub temperature and steepness
	ensembleSizes = numpy.array([20, 50]);
	backrubTemps = numpy.array([0.3, 0.6, 0.9, 1.2, 1.5, 1.8]);
	boltzmannTemps = numpy.array([0, -1, 1, 5.0]);
	steepnessRange = numpy.array([0.5, 5]);
	minWeights = numpy.array([0, 0, 0, 0, 0, 0]);
	maxWeights = numpy.array([1, 1, 0, 1, 1, 1]);

	print("Initializing objects\n");

	data = "/netapp/home/tianjiao.zhang/data/DHFR_MSD_M20loop_repeat1.tsv";
	#data =  "C:\\Users\\Candy\\SkyDrive\\Documents\\rotation 2\\DHFR_MSD_M20loop\\DHFR_MSD_M20loop_repeat" + str(i + 1) + ".tsv";
	#targetFreqs = "C:\\Users\\Candy\\SkyDrive\\Documents\\rotation 2\\ecDHFR_openseq_bacterial_representative_final_align_trim.fasta";
	targetFreqs = "/netapp/home/tianjiao.zhang/data/ecDHFR_openseq_bacterial_representative_final_align_trim.fasta";

	optimizer = Optimizer(MACROSTATES);

	# slightly different paths on my two computers
	try:
		optimizer.readTargetFrequencies(targetFreqs);	
		optimizer.readData(data);
	except:
		optimizer.readTargetFrequencies(targetFreqsAlt);	
		optimizer.readData(dataAlt);

	print("Files read in");

	search = CuckooSearch(optimizer.models, Chi2Kernel(optimizer.targetFrequencies), False, 64, 1, 0.25);
	search.setMaxIterations(iterations);
	search.setParamBounds(ensembleSizes, backrubTemps, boltzmannTemps, steepnessRange, minWeights, maxWeights);
	search.setSearchParameters(False, True, True, True, numpy.array([True, True, False, True, True, True]));
	optimizer.useAlgorithm(search);

	print("\nChi2 kernel");
	#search.setSimilarityMeasure(JensenShannonDistance(optimizer.targetFrequencies));
	optimizer.optimize();
	now = datetime.now();
	optimizer.writeFrequenciesToFASTA(optimizer.getBestFrequencies(), "Chi2 test " + now.strftime('%Y%m%d%H%M%S') + ".fasta");
	optimizer.writeBestParamsToText("Chi2 test " + now.strftime('%Y%m%d%H%M%S'));
	print(optimizer.getBestParameters()['match']);
	
	return None;
Пример #23
0
def get_baseline_rec(net_name, dataset_name, ps, init_acc):
    rec_finder = RecordFinder(net_name, dataset_name, ps, ('*', '*'), '*', '*',
                              init_acc)
    bs_line_fn = rec_finder.find_rec_filename(None, RecordType.BASELINE_REC)
    if bs_line_fn is None:
        optim = Optimizer(ps, (None, None), None, None)
        optim.base_line_result()
        bs_line_fn = rec_finder.find_rec_filename(None,
                                                  RecordType.BASELINE_REC)
    if bs_line_fn is None:
        print(
            f' !!! Was not able to get baseline result for initial accuracy of {init_acc} !!!'
        )
        print(f' !!! Adjust TEST_SET_SIZE in Config.py !!!')
        return bs_line_fn
    return load_from_file(bs_line_fn, '')
Пример #24
0
def DHFRcomparemeasures(similarity:int):
	MACROSTATES = enum("E-DHF-NADPH", "E-NADPH", "E-OPEN", "E-THF", "E-THF-NADPX", "TS");
	RESIDUES = enum('A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y');

	ensembleSizes = numpy.array([20, 50]);
	backrubTemps = numpy.array([0.3, 0.6, 0.9, 1.2, 1.5, 1.8]);
	boltzmannTemps = numpy.array([0, -1, 1, 5.0]);
	steepnessRange = numpy.array([0.5, 5]);
	minWeights = numpy.array([0, 0, 0, 0, 0, 0]);
	maxWeights = numpy.array([1, 1, 0, 1, 1, 1]);

	data = "/netapp/home/tianjiao.zhang/data/DHFR_MSD_M20loop_repeat1.tsv";
	targetFreqs = "/netapp/home/tianjiao.zhang/data/ecDHFR_openseq_bacterial_representative_final_align_trim.fasta";
	optimizer = Optimizer(MACROSTATES);
	optimizer.readTargetFrequencies(targetFreqs);
	optimizer.readData(data);

	measure = "";
	if similarity == 0:
		search = CuckooSearch(optimizer.models, JensenShannonDistance(optimizer.targetFrequencies), False, 64, 1, 0.25);
		measure = " JSD";
	elif similarity == 1:
		search = CuckooSearch(optimizer.models, CosineSimilarity(optimizer.targetFrequencies), False, 64, 1, 0.25);
		measure = " Cos";
	elif similarity == 2:
		search = CuckooSearch(optimizer.models, KLDivergence(optimizer.targetFrequencies), False, 64, 1, 0.25);
		measure = " KLD";
	elif similarity == 3:
		search = CuckooSearch(optimizer.models, EntropyWeightsMixedSimilarity(CosineSimilarity(), JensenShannonDistance(), optimizer.targetFrequencies), False, 64, 1, 0.25);
		measure = " Mix"
	elif similarity == 4:
		search = CuckooSearch(optimizer.models, EntropyWeightedSimilarity(JensenShannonDistance(), optimizer.targetFrequencies), False, 64, 1, 0.25);
		measure = "Weighted JSD";
	else:
		search = CuckooSearch(optimizer.models, Chi2Kernel(optimizer.targetFrequencies), False, 64, 1, 0.25);
		measure = "Chi2 kernel";
	search.setMaxIterations(2048);
	search.setParamBounds(ensembleSizes, backrubTemps, boltzmannTemps, steepnessRange, minWeights, maxWeights);
	search.setSearchParameters(True, True, True, True, numpy.array([True, True, False, True, True, True]));
	optimizer.useAlgorithm(search);
	optimizer.optimize();

	name = "DHFR compare measures " + measure + " " + datetime.now().strftime('%Y%m%d%H%M');
	optimizer.writeFrequenciesToFASTA(optimizer.getBestFrequencies(), name + ".fasta", 3);
	optimizer.writeBestParamsToText(name + ".txt");
Пример #25
0
def compile_model(model: object, config: object) -> object:
    print('Finished building model')
    print('Compiling model...')
    # set up metrics and optimizer
    metrics = ['accuracy']
    optimizer = Optimizer(config.optimizer).optimizer
    # compile model
    if config.complexity == 'celeba':
        model.compile(loss='mean_squared_error',
                      optimizer=optimizer,
                      metrics=metrics)
    else:
        model.compile(loss='binary_crossentropy',
                      optimizer=optimizer,
                      metrics=metrics)
    print('Finished compiling')
    model.summary()
    return model
Пример #26
0
def smallTestBoltz():
	print("Hello!\n");
	MACROSTATES = enum("E-DHF-NADPH", "E-NADPH", "E-THF", "E-THF-NADPX", "TS");
	RESIDUES = enum('A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y');

	ensembleSizes = numpy.array([128]);
	backrubTemps = numpy.array([0.3, 0.6, 0.9, 1.2, 1.5, 1.8]);
	boltzmannTemps = numpy.array([-1, 5]);
	steepnessRange = numpy.array([1, 7]);
	minWeights = numpy.array([0, 0, 0, 0, 0]);
	maxWeights = numpy.array([1, 1, 1, 1, 1]);

	print("Initializing objects\n");

	targetFreqs = "C:\\Users\\Candy\\SkyDrive\\Documents\\rotation 2\\ecDHFR_openseq_bacterial_representative_final_align_trim.fasta";
	targetFreqsAlt = "C:\\Users\\Candy_000\\SkyDrive\\Documents\\rotation 2\\ecDHFR_openseq_bacterial_representative_final_align_trim.fasta";
	dataMicro = "C:\\Users\\Candy\\SkyDrive\\Documents\\rotation 2\\20160120_M20_enumeration_scores\\microstates.dat";
	dataMicroAlt = "C:\\Users\\Candy_000\\SkyDrive\\Documents\\rotation 2\\20160120_M20_enumeration_scores\\microstates.dat";

	optimizer = Optimizer(MACROSTATES, True);

	try:
		optimizer.readTargetFrequencies(targetFreqs);	
		optimizer.readFormattedMicrostateData(dataMicro);
	except FileNotFoundError:
		optimizer.readtargetfrequencies(targetfreqsalt);	
		optimizer.readFormattedMicrostatedata(datamicroalt);
		
	search = CuckooSearch(optimizer.models, JensenShannonDistance(optimizer.targetFrequencies), True, 64, 1, 0.25);
	search.setMaxIterations(2048);
	search.setParamBounds(ensembleSizes, backrubTemps, boltzmannTemps, steepnessRange, minWeights, maxWeights);
	search.setSearchParameters(False, True, True, True, numpy.array([True, True, True, True, True, True]));
	optimizer.useAlgorithm(search);
	optimizer.optimize();
	now = datetime.now();
	optimizer.writeFrequenciesToFASTA(optimizer.getBestFrequencies(), "var ensembles " + now.strftime('%Y%m%d%H%M') + ".fasta");
	optimizer.writeBestParamsToText("var ensembles " + now.strftime('%Y%m%d%H%M'));
	
	#for i in range(8):
	#	thread = optimizerThread();
	#	thread.copyOptimizer(optimizer);
	#	thread.run();

	return None;
Пример #27
0
def optimize():
    #loaaaadd from db
    db = client['mydb']
    coll = db['deliveries']
    coll.delete_many({})
    doc = coll.find()
    for x in doc:
        try:
            #print(x)
            print("========================")
            my_del = x['value']
            my_del = pickle.loads(my_del)
            print("start:", my_del.start)
            print("end:", my_del.end)
            my_del.set_optimized_path()
            print(my_del.agent)
            print("paths", my_del.optimized_path)
            print("========================")
            '''
			if my_del.agent.name == "Mohsen":
				agent_1.add_delivery(my_del)
			elif my_del.agent.name == "Ali":
				agent_2.add_delivery(my_del)
			'''
        except Exception as e:
            print(e)
            continue

    print(doc)
    data = request.get_json()
    d = Delivery(data['start'], data['end'], graph)
    d.set_optimized_path()
    print(data)
    op = Optimizer(agents, graph)
    result = op.squeeze_2(d)
    print("result ", result)
    if result != None:
        res = "delivery assigned to " + result.name
    else:
        res = " Could not assign this delivery to any agent"

    return jsonify({"path": d.optimized_path, "res": res})
Пример #28
0
def test_optimizer():
    optimizer = Optimizer()
    run_id = 0

    optimizer.outfile = 'optimizer_out.xls'

    ## parameter space to search over
    optimizer.add_parameter(
        dict(name='length', min_val=100, max_val=200, steps=32, converter=int))

    while not optimizer.converged():
        params_set = optimizer.generate_set()
        for strategy_params in params_set:
            log.debug('Testing: %s' % pprint.pformat(strategy_params))
            sim_stats = test_strategy(strategy_params, run_id)
            optimizer.score(sim_stats, run_id)
            run_id += 1
        optimizer.dump()

    optimizer.write()
Пример #29
0
def time_estimate(n,m,k,tau,lower_bounds, upper_bounds, r, rN, \
      max_normal, sorted_index, num_processes, multi_event, force):
    """
		Estimates the runtime with the specified bounds and parameters
	"""

    print "Estimating time..."
    if n is 3 and m > 30 and not force:
        print "\tWARNING: With n=3 and", m, "intervals, the runtime would likely be excessive. Try reducing the number of intervals below 25. Run with --FORCE to continue."
        exit(1)

    enum = Enumerator(n, m, k, tau, lower_bounds, upper_bounds, multi_event)
    opt = Optimizer(r, rN, m, n, tau, upper_bound=max_normal)

    if n == 2:
        TEST_NUM = 100
        count = count_number_matrices_2(m, upper_bounds, lower_bounds)
    else:
        TEST_NUM = 20
        count = count_number_matrices_3(m, upper_bounds, lower_bounds, enum)
    C = enum.generate_next_C()
    if C is False:
        print "ERROR: No valid Copy Number Profiles exist for these intervals within the bounds specified. Exiting..."
        exit(1)
    start = time.clock()
    for i in range(TEST_NUM):
        try:
            soln = opt.solve(C)
            C = enum.generate_next_C()
        except:
            break

    end = time.clock()
    avgVal = float(end - start) / TEST_NUM
    seconds = count * (float(end - start) / TEST_NUM) / num_processes
    print "\tEstimated Total Time:",
    if seconds < 60: print int(seconds + .5), "second(s)"
    elif seconds < 3600: print int((seconds / 60) + .5), "minute(s)"
    else:
Пример #30
0
def do_stuff_with_map(map):
    # setting the parameters for the STL specification generator
    time_bound = 20
    goal = (11.1, 8.8)
    accuracy = 0.45
    time_steps = time_bound + 1

    # setting the parameters for the optimizer
    initial_state = np.asarray([7.9, 0, 11, 0])[:, np.newaxis]
    u_guess = np.zeros((2, time_steps))

    # optimization method
    method = 'Nelder-Mead'

    my_reachavoid = ReachAvoid(map, time_bound, goal, accuracy)
    ax = my_reachavoid.return_region()
    my_finished_specification = my_reachavoid.full_spec

    my_optimizer = Optimizer(initial_state, my_finished_specification,
                             time_bound, time_steps, u_guess, ax)
    optimal_trajectory = my_optimizer.optimize(method)
    print("robustness: %s" % (my_optimizer.rho(optimal_trajectory)))
    my_optimizer.plot_trajectory(optimal_trajectory)