Beispiel #1
0
def Main(algorithm, problem, pop_size, crossover_probability,
         mutation_probability, n_partitions, n_gen, seed):

    # Instancia el problema
    problem = Problems.get(problem)

    reference_directions = get_reference_directions("das-dennis",
                                                    problem.n_obj,
                                                    n_partitions=n_partitions)

    # Instancia el algoritmo
    algorithm = NSGA_II.Get_Algorithm_Instance(
        pop_size, crossover_probability, mutation_probability
    ) if (algorithm == Algorithms.NSGAII) else NSGA_III.Get_Algorithm_Instance(
        reference_directions, pop_size, crossover_probability,
        mutation_probability) if (algorithm == Algorithms.NSGAIII) else None

    # Instancia el optimizador
    optimizer = Optimizer(problem, algorithm)

    optimization_result = optimizer.Minimize(n_gen, seed)
    objective_spaces_values = optimization_result.F

    pareto_front = problem.pareto_front(reference_directions) if type(
        problem).__name__ == "DTLZ1" else problem.pareto_front()

    # Instancia los indicadores de rendimiento (Distancia Generacional Invertida (IGD) / Distancia Generacional Invertida Plus (IGD+))
    IGD = get_performance_indicator("igd", pareto_front)
    #IGD_plus = get_performance_indicator("igd+", pareto_front)

    # Imprime las métricas obtenidas por el conjunto de soluciones resultantes de la optimización multimodal/multiobjetivo
    print("IGD:", IGD.calc(objective_spaces_values))
Beispiel #2
0
    def __init__(self, inputs, input_length, targets, target_length,
                 max_target_length, num_layers, rnn_size, sample_prob,
                 keep_prob, learning_rate, num_labels, embedding_size,
                 batch_size, length_cost_prop, GO, EOS):
        self.GO = GO
        self.EOS = EOS
        self.batch_size = batch_size
        self.num_labels = num_labels

        with tf.name_scope("decoder") as scope:
            self.output_embeddings = self.generate_embeddings(embedding_size)
            self.length_predictions = self.length_detection(
                inputs, target_length)
            self.logits, self.predictions = self.decoding_layer(
                inputs, input_length, targets, target_length,
                max_target_length, num_layers, rnn_size, sample_prob,
                keep_prob)
            self.cost = self.calculate_loss(self.logits, targets,
                                            self.length_predictions,
                                            target_length, max_target_length,
                                            length_cost_prop)
            self.optimizer = Optimizer(learning_rate)
            self.train_op = self.optimizer.apply_gradients(self.cost)
        self.summary_op = tf.summary.merge(
            tf.get_collection(tf.GraphKeys.SUMMARIES, scope='decoder'))
    def __init__(self, model, activationfn=None):
        if isinstance(model, Model):
            self.Model = model
            self.Output_Options = []
            self.Input_Options = []
            self.Callback = None
            self.LastLayerId = None
            self.FirstLayerId = None
            self.LastNodes = []
            self.ParallelExecute = False
            self.previous_accuracy = 0.0
            self.current_accuracy = 0.0
            self.counter_ep = 1
            self.optimum_pass = 10
            self.activationfn = activationfn()
            self.setActivationFunction(activationfn)
            #save a ref copy of all nodes of model.
            self.SaveTensors = SaveTensors(
                allTNodes=self.Model.get_AllTNodes())
            Optimizer.__init__(self,
                               model=model,
                               SavedTensors=self.SaveTensors,
                               optimizer='gradientdecent',
                               activationfn=self.activationfn)

        else:
            raise RuntimeError("passed model is not of type Model")
    def runMesh(self):
        t0 = time.time()
        while (self.Ract <= self.Rmax):
            print("----------------------------------")
            print("actT: " + str(self.Tact) + " - " + str(self.mis.sec))
            #print("Torig: " + str(self.Torigin))
            print("actV: " + str(self.Vact) + " - " + str(self.mis.tas))
            print("actR: " + str(self.Ract / 1000.) + " - " +
                  str(self.mis.calcDistance() / 1000.))

            self.mis.saveMission()

            verbose = False
            opt = Optimizer(self.mis, verbose=verbose, savePlots=False)
            opt.saveFlights(verbose=False)
            del opt

            ini = Ini(self.mis.NAME)
            ini.copyConfig(self.mis.getResultsPath() + "config.ini")
            del ini

            self.nextMission()
            #self.mis.plotMission()
        t1 = time.time()
        print('[Mesh] Runtime [s]: %f' % (t1 - t0))
Beispiel #5
0
    def execute(self):
        partnerDataReader = PartnerDataReader(self.partnerId)
        perPartnerSimulator = PerPartnerSimulator()

        self.currentDay = 0
        perClickCost = perPartnerSimulator.getPerClickCost(
            partnerDataReader.getDay(-1))
        print("PerClickCost: " + str(perClickCost))
        accumulatedProfits = []
        days = []
        accumulatedProfit = 0.00
        allProducts = []

        jsonLog = {}
        jsonLog['days'] = []

        for x in range(self.allDays):
            data = partnerDataReader.getDay(self.currentDay)
            profit = perPartnerSimulator.calculatePerDayProfitGainFactors(
                data, perClickCost)
            accumulatedProfit = accumulatedProfit + profit
            timeCol = data[3]
            print("Day " + str(x) + ": " + str(profit) + " Accumulated: " +
                  str(accumulatedProfit))
            accumulatedProfits.append(accumulatedProfit)
            days.append(self.currentDay)

            products = perPartnerSimulator.getProducts(data)

            for y in range(len(products)):
                if products[y] not in allProducts:
                    allProducts.append(products[y])
                    #allProducts.append(str(products[y]))
                    #print(datetime.utcfromtimestamp(timeCol[y]))

            optimizer = Optimizer(allProducts)
            excluded = optimizer.getExcludedProductsPseudoradnomly()
            excluded.sort()

            print("Excluded: " + str(len(excluded)))
            for y in range(len(excluded)):
                print(excluded[y])

            dateToJson = datetime.utcfromtimestamp(
                timeCol[0]) + timedelta(days=1)
            jsonLog['days'].append({
                'day':
                str(dateToJson.year) + "-" + str(dateToJson.month) + "-" +
                str(dateToJson.day),
                'productsToExclude':
                excluded,
            })

            self.currentDay = self.currentDay + 1

        with open('log.json', 'w') as outfile:
            json.dump(jsonLog, outfile)

        print('JSON saved')
 def __init__(self, inputs, targets, learning_rate, batch_size):
     self.batch_size = batch_size
     with tf.variable_scope("edge_detection"):
         self.output = self.edge_detection(inputs)
         self.cost = self.calculate_loss(self.output, targets)
         self.optimizer = Optimizer(learning_rate)
         self.train_op = self.optimizer.apply_gradients(self.cost)
     self.summary_op = tf.summary.merge(
         tf.get_collection(tf.GraphKeys.SUMMARIES, scope='edge_detection'))
def run_all_acc_loss_possibilities(ps,
                                   ones_range,
                                   gran_th,
                                   mode=None,
                                   acc_loss_opts=ACC_LOSS_OPTS,
                                   patterns_idx=None):
    for acc_loss in acc_loss_opts:
        optim = Optimizer(ps, ones_range, gran_th, acc_loss)
        optim.run_mode(mode)
Beispiel #8
0
 def __init__(self, inputs, targets, learning_rate, min_mean, max_mean):
     self.min_mean = min_mean
     self.max_mean = max_mean
     with tf.variable_scope("mean_detection"):
         logits = self.mean_detection(inputs)
         self.output = self.inverse_scale_mean_targets(logits)
         self.cost = self.calculate_loss(logits, targets)
         self.optimizer = Optimizer(learning_rate)
         self.train_op = self.optimizer.apply_gradients(self.cost)
     self.summary_op = tf.summary.merge(
         tf.get_collection(tf.GraphKeys.SUMMARIES, scope='mean_detection'))
Beispiel #9
0
 def run(self):
     '''
     invokes the config parser, generates the sql statements and runs optimizer 
     '''
     Parser=ConfigParser(self.filename)
     config=Parser.configContent
     translator=Translator(config)
     translator.translate()
     self.blocks=translator.getBlocks()
     op=Optimizer(translator.getConvertedOptions(),self.blocks,translator.getSenders())
     op.run()
Beispiel #10
0
    def __init__(self, X_train, Y_train, activation_function, optimizer):

        self.input = X_train
        self.weights1 = np.random.rand(X_train.shape[1], 7)
        self.bias1 = np.zeros((X_train.shape[0], 7))
        self.weights2 = np.random.rand(7, 1)
        self.bias2 = np.zeros((Y_train.shape[1], 1))
        self.y = Y_train
        self.output = np.zeros(Y_train.shape)
        self.optimizer = optimizer  #string
        self.Optimizer = Optimizer(activation_function)  #object
        self.activation_function = activation_function  #string
        self.Activation = Activation_Function()  #object
Beispiel #11
0
def do_optimization_single(n,m,k,tau,lower_bounds, upper_bounds, r, rN, \
            max_normal, sorted_index, multi_event, get_values):
    """
    Performs the optimization for the given parameters with a single process
    Returns a list of the best C matrices and associated mu values
    and likelihoods
    """

    enum = Enumerator(n, m, k, tau, lower_bounds, upper_bounds, multi_event)
    opt = Optimizer(r, rN, m, n, tau, upper_bound=max_normal)
    min_likelihood = float("inf")
    best = []
    count = 0

    #fix missing first matrix problem
    C = enum._C_to_array()
    #C = enum.generate_next_C()
    if get_values: solns = []
    while C is not False:
        count += 1
        soln = opt.solve(C)
        if soln is not None:
            (mu, likelihood, vals) = soln

            if get_values: solns.append((C, mu, likelihood))
            if isClose([likelihood], [min_likelihood]):
                C_new = reverse_sort_C(C, sorted_index)
                vals = reverse_sort_list(vals, sorted_index)
                best.append((C_new, mu, likelihood, vals))
            elif likelihood < min_likelihood:
                C_new = reverse_sort_C(C, sorted_index)
                vals = reverse_sort_list(vals, sorted_index)
                best = [(C_new, mu, likelihood, vals)]
                min_likelihood = likelihood

        C = enum.generate_next_C()

    if get_values:
        with open(pre + "." + "likelihoods", 'w') as f:
            for C, mu, likelihood in solns:
                m, n = C.shape
                stringC = "".join((str(int(C[i][1])) for i in range(m)))
                f.write(stringC + "\t" + str(mu[0]) + "\t" + str(likelihood) +
                        "\n")

    if count == 0:
        print "Error: No valid Copy Number Profiles exist for these intervals within the bounds specified. Exiting..."
        sys.exit(1)
    return best
Beispiel #12
0
def do_optimization_single(n,m,k,tau,lower_bounds, upper_bounds, r, rN, \
		    max_normal, sorted_index, multi_event, get_values):
	"""
	Performs the optimization for the given parameters with a single process
	Returns a list of the best C matrices and associated mu values
	and likelihoods
	"""

	enum = Enumerator(n, m, k, tau, lower_bounds, upper_bounds, multi_event)
	opt = Optimizer(r, rN, m, n,tau, upper_bound=max_normal)
	min_likelihood = float("inf")
	best = []
	count = 0

	#fix missing first matrix problem
	C=enum._C_to_array()
	#C = enum.generate_next_C()
	if get_values: solns = []
	while C is not False:
		count += 1
		soln = opt.solve(C)
		if soln is not None:
			(mu, likelihood,vals) = soln

			if get_values: solns.append((C,mu,likelihood))
			if isClose([likelihood],[min_likelihood]):
				C_new = reverse_sort_C(C,sorted_index)
				vals = reverse_sort_list(vals, sorted_index)
				best.append((C_new, mu, likelihood, vals))
			elif likelihood < min_likelihood:
				C_new = reverse_sort_C(C,sorted_index)
				vals = reverse_sort_list(vals, sorted_index)
				best = [(C_new, mu, likelihood, vals)]
				min_likelihood = likelihood

		C = enum.generate_next_C()

	if get_values:
		with open(pre+"."+"likelihoods",'w') as f:
			for C,mu,likelihood in solns:
				m,n = C.shape
				stringC = "".join((str(int(C[i][1])) for i in range(m)))
				f.write(stringC+"\t"+str(mu[0])+"\t"+str(likelihood)+"\n")

	if count == 0:
		print "Error: No valid Copy Number Profiles exist for these intervals within the bounds specified. Exiting..."
		sys.exit(1)
	return best
Beispiel #13
0
	def __init__(self, dataloader, hierarchical_transformer, config, i):

		super(Trainer, self).__init__()

		self.iter = i
		self.config = config
		self.cpu = torch.device("cpu")
		self.multi_gpu = len(self.config.gpu_idx) > 1

		self.dataloader = dataloader
		self.word_encoder = WordEncoder.WordEncoder(config, self.dataloader.tweet_field.vocab)
		self.word_pos_encoder = PositionEncoder.PositionEncoder(config, self.config.max_length)
		self.time_delay_encoder = PositionEncoder.PositionEncoder(config, self.config.size)

		# <----------- Check for GPU setting ----------->
		if self.config.gpu:

			self.hierarchical_transformer = DataParallelModel(hierarchical_transformer.cuda())
			self.criterion = DataParallelCriterion(nn.NLLLoss())

		else:
			self.hierarchical_transformer = hierarchical_transformer
			self.criterion = nn.NLLLoss()

		self.adam_optimizer = optim.Adam(self.hierarchical_transformer.parameters(), np.power(self.config.d_model, - 0.5), betas = (self.config.beta_1, self.config.beta_2))
		self.optimizer = Optimizer.Optimizer(self.config, self.adam_optimizer)
def get_baseline_rec(net_name, dataset_name, ps, init_acc):
    rec_finder = RecordFinder(net_name, dataset_name, ps, ('*', '*'), '*', '*',
                              init_acc)
    bs_line_fn = rec_finder.find_rec_filename(None, RecordType.BASELINE_REC)
    if bs_line_fn is None:
        optim = Optimizer(ps, (None, None), None, None)
        optim.base_line_result()
        bs_line_fn = rec_finder.find_rec_filename(None,
                                                  RecordType.BASELINE_REC)
    if bs_line_fn is None:
        print(
            f' !!! Was not able to get baseline result for initial accuracy of {init_acc} !!!'
        )
        print(f' !!! Adjust TEST_SET_SIZE in Config.py !!!')
        return bs_line_fn
    return load_from_file(bs_line_fn, '')
Beispiel #15
0
 def __init__(self, params):
     self.device_id = params["device_id"]
     self.epochs = params["epochs"]
     # data
     self.train_data = Data(params["train_data"])
     #self.test_data = Data(params["test_data"])
     #self.validation_data = Data(params["validation_data"])
     #self.progress_train_data = Data(params["progress_train_data"])
     self.progress_train_data = None
     self.progress_test_data = Data(params["progress_test_data"])
     # model
     self.model = Model.get(params["model"])
     print(self.model)
     if self.device_id != -1:
         self.model = self.model.cuda(self.device_id)
     # optimizer
     self.optimizer = Optimizer.get(self.model, params["optimizer"])
     # loss
     self.loss_func = Loss.get(params["loss"])
     #if self.device_id != -1:
     #  self.loss_func = self.loss_func.cuda(self.device_id)
     # progress evaluator
     self.progress_evaluator = ProgressEvaluator.get(
         params["progress_evaluator"], self.progress_train_data,
         self.progress_test_data, self.device_id)
Beispiel #16
0
	def __init__(self, chromosome, resEval, penEval, fitnessEval, popSize, elitism):

		Optimizer.__init__(self)

		self.Chromosome = chromosome
		self.ResEval = resEval
		self.PenEval = penEval
		self.FitnessEval = fitnessEval
		self.PopulationSize = popSize
		self.Elitism = elitism
		self.SelectionOp = TournamentSelectionOperator(2, 2)
		self.RecombinationOp = DrunkRecombinationOperator()
		self.MutationOp = None
		self.Population = None

		self.PenEval.ConstraintPenalties.append(Penalty("LOWER_BOUND", "__SUCCESS__", 1.0, 1.0, 0.5))
Beispiel #17
0
class MeanDetectModel:
    def __init__(self, inputs, targets, learning_rate, min_mean, max_mean):
        self.min_mean = min_mean
        self.max_mean = max_mean
        with tf.variable_scope("mean_detection"):
            logits = self.mean_detection(inputs)
            self.output = self.inverse_scale_mean_targets(logits)
            self.cost = self.calculate_loss(logits, targets)
            self.optimizer = Optimizer(learning_rate)
            self.train_op = self.optimizer.apply_gradients(self.cost)
        self.summary_op = tf.summary.merge(
            tf.get_collection(tf.GraphKeys.SUMMARIES, scope='mean_detection'))

    def mean_detection(self, enc_output):
        with tf.name_scope("output"):
            dense_cell = tf.layers.dense(inputs=enc_output,
                                         units=1)  # linear activation
            means = tf.reduce_mean(dense_cell, axis=2)
        return means

    def scale_mean_targets(self, targets):
        means_range = self.max_mean - self.min_mean
        return (targets - self.min_mean) * 2 / means_range - 1

    def inverse_scale_mean_targets(self, targets):
        means_range = tf.cast(self.max_mean - self.min_mean, tf.float32)
        return (targets + 1.) * means_range / 2. + tf.cast(
            self.min_mean, tf.float32)

    def calculate_loss(self, predictions, targets):
        with tf.name_scope("loss"):
            scaled_targets = self.scale_mean_targets(targets)
            cost = tf.losses.mean_squared_error(predictions, scaled_targets)
        tf.summary.scalar("loss", cost)
        return cost
Beispiel #18
0
    def start(self):

        self.input_controller.fault_tolerance_request()
        self.input_controller.initial_groups_request()
        self.input_controller.blocked_vector_request()

        tested_faults = len(max(self.input_controller.initial_groups, key=len))

        processors = Processors(self.input_controller.initial_groups)
        generator = FormulasGenerator(processors,
                                      self.input_controller.fault_tolerance)
        selector = Selector(self.input_controller.blocked_vectors)
        tester = PrTester(processors)
        blocker = Blocker(self.input_controller.blocked_vectors, tester,
                          selector, processors)
        '''
            For original formulas
        '''
        generator.generate(self.input_controller.initial_groups,
                           self.original_formulas_file)
        optimizer_original = Optimizer(self.input_controller.fault_tolerance,
                                       processors,
                                       self.optimized_original_file)
        optimizer_original.optimize(generator.origin_formulas_arr)
        blocking_original_formulas = blocker.block_original(
            selector.select_group(optimizer_original.result_formulas))
        tester.test(self.result_of_testing_original_file,
                    blocking_original_formulas, tested_faults,
                    self.input_controller.fault_tolerance)
        '''
            For researched formulas
        '''
        self.input_controller.researched_way_request()
        if self.input_controller.researched_way:

            generator.generate(
                processors.form_groups_according_to_blocked_vector(
                    self.input_controller.blocked_vectors),
                self.researched_formulas_file)
            optimizer_researched = Optimizer(
                self.input_controller.fault_tolerance, processors,
                self.optimized_researched_file)
            optimizer_researched.optimize(generator.origin_formulas_arr)
            #selector.select_group(optimizer_researched.result_formulas)
            blocking_researched_formulas = blocker.block_researched(
                selector.select_group(optimizer_researched.result_formulas))
            tester.test(self.result_of_testing_researched_file,
                        blocking_researched_formulas, tested_faults,
                        self.input_controller.fault_tolerance)

        self.input_controller.repeat_request()

        return self.input_controller.repeat
Beispiel #19
0
    def __init__(self, **kargs):
        Optimizer.__init__(self, **kargs)
        self.epochs = 0
        self.npop = 10
        if "population" in kargs:
            self.npop = kargs["population"]
        self.mutationrate = 0.2
        if "mutationrate" in kargs:
            self.mutationrate = kargs["mutationrate"]
        self.threads = 4
        if "threads" in kargs:
            self.threads = kargs["threads"]

        self.population = []
        self.MAX_INT = 2**32 if not self.maximize else -2**32
        for _ in range(0, self.npop):
            self.population.append([self.getInstance(), self.MAX_INT])

        self.lastscore = self.MAX_INT
    def test_grid_search_optimizer_as_expected(self):
        # Initialize market data loading values
        tickers = ['SPY']
        ticker_types = ['']
        data_sources = ['CSV']
        start_date = pd.to_datetime('2016-01-01')
        end_date = pd.to_datetime('2016-5-31')
        history_window = 20
        csv_data_uri = "support_files"

        # Load market data
        data = market_data.load_market_data(tickers, ticker_types,
                                            data_sources, start_date, end_date,
                                            history_window, csv_data_uri)

        # Initialize grid search optimizer values
        algorithm_uri = "support_files/MovingAverageDivergenceAlgorithm.py"
        num_processors = 4
        commission = 0.0
        ticker_spreads = [0.0001]
        optimizer_name = "GridSearchOptimizer"
        optimization_metric = "sharpe_ratio"
        optimization_metric_ascending = True
        optimization_parameters = {
            "ma_long_window": [10, 20, 2],
            "ma_short_window": [2, 5, 2],
            "open_long": [-0.25, -0.25, 1],
            "close_long": [0.4, 0.4, 1]
        }
        time_resolution = "daily"

        # Create trading algorithm
        trading_algorithm = TradingAlgorithm.create_trading_algorithm(
            algorithm_uri, tickers, history_window, None)

        # Setup and run the optimizer
        optimizer = of.create_optimizer(num_processors, optimizer_name,
                                        trading_algorithm, commission,
                                        ticker_spreads, optimization_metric,
                                        optimization_metric_ascending,
                                        optimization_parameters,
                                        time_resolution)
        results = optimizer.run(data, start_date, end_date)

        # Manually compute optimal parameters
        opt_params = Optimizer.get_optimal_parameters(
            results.backtest_results, optimization_metric,
            results.parameter_sets, optimization_metric_ascending,
            time_resolution)

        # Check results
        self.assertEqual(opt_params, results.optimal_parameters)
        self.assertTrue(
            len(results.backtest_results) == len(results.parameter_sets))
        self.assertEqual(4, len(results.backtest_results))
def optimize():
    #loaaaadd from db
    db = client['mydb']
    coll = db['deliveries']
    coll.delete_many({})
    doc = coll.find()
    for x in doc:
        try:
            #print(x)
            print("========================")
            my_del = x['value']
            my_del = pickle.loads(my_del)
            print("start:", my_del.start)
            print("end:", my_del.end)
            my_del.set_optimized_path()
            print(my_del.agent)
            print("paths", my_del.optimized_path)
            print("========================")
            '''
			if my_del.agent.name == "Mohsen":
				agent_1.add_delivery(my_del)
			elif my_del.agent.name == "Ali":
				agent_2.add_delivery(my_del)
			'''
        except Exception as e:
            print(e)
            continue

    print(doc)
    data = request.get_json()
    d = Delivery(data['start'], data['end'], graph)
    d.set_optimized_path()
    print(data)
    op = Optimizer(agents, graph)
    result = op.squeeze_2(d)
    print("result ", result)
    if result != None:
        res = "delivery assigned to " + result.name
    else:
        res = " Could not assign this delivery to any agent"

    return jsonify({"path": d.optimized_path, "res": res})
Beispiel #22
0
def optimize(model,
             data,
             itermax=50,
             k=1,
             act_fn=round):  # round rounds .5 to zero
    weights = [model.W]
    accuracies = [model.test_acc(data, k=k, act_fn=act_fn)]
    for _ in range(itermax):
        model.W = Optimizer.gradient_descent(model, data)
        weights.append(model.W)
        accuracies.append(model.test_acc(data, k=k, act_fn=act_fn))
    return weights, accuracies
Beispiel #23
0
def do_stuff_with_map(map):

    # the following code is exactly the same as in do_stuff_with_map in ROSstuff.py

    # setting the parameters for the STL specification generator
    time_bound = 20
    goal = (3, 1)
    accuracy = 0.25
    time_steps = time_bound + 1

    # setting the parameters for the optimizer
    initial_state = np.asarray([0.5, 0, 0.5, 0])[:, np.newaxis]
    u_guess = np.zeros((2, time_steps)).flatten()
    u_guess = np.asarray(
        [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
         [
             0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
             0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1
         ]])

    # optimization method
    method = 'Powell'

    my_reachavoid = ReachAvoid(map, time_bound, goal, accuracy)
    ax = my_reachavoid.return_region()
    my_finished_specification = my_reachavoid.full_spec

    my_optimizer = Optimizer(initial_state, my_finished_specification,
                             time_bound, time_steps, u_guess, ax)
    optimal_trajectory = my_optimizer.optimize(method)
    print("robustness: %s" % (my_optimizer.rho(optimal_trajectory)))
    my_optimizer.plot_trajectory(optimal_trajectory)

    print(my_reachavoid.full_spec)
Beispiel #24
0
def time_estimate(n,m,k,tau,lower_bounds, upper_bounds, r, rN, \
		    max_normal, sorted_index, num_processes, multi_event, force):
	"""
		Estimates the runtime with the specified bounds and parameters
	"""


	print "Estimating time..."
	if n is 3 and m > 30 and not force:
		print "\tWARNING: With n=3 and", m, "intervals, the runtime would likely be excessive. Try reducing the number of intervals below 25. Run with --FORCE to continue."
		exit(1)

	enum = Enumerator(n, m, k, tau, lower_bounds, upper_bounds, multi_event)
	opt = Optimizer(r, rN, m, n,tau, upper_bound=max_normal)

	if n == 2:
		TEST_NUM=100
		count = count_number_matrices_2(m,upper_bounds, lower_bounds)
	else:
		TEST_NUM=20
		count = count_number_matrices_3(m,upper_bounds, lower_bounds, enum)
	C = enum.generate_next_C()
	if C is False:
		print "ERROR: No valid Copy Number Profiles exist for these intervals within the bounds specified. Exiting..."
		sys.exit(1)
	start = time.clock()
	for i in range(TEST_NUM):
		try:
			soln = opt.solve(C)
			C = enum.generate_next_C()
		except:	break

	end = time.clock()
	avgVal = float(end-start)/TEST_NUM
	seconds = count * (float(end-start)/TEST_NUM) / num_processes
	print "\tEstimated Total Time:",
	if seconds < 60: print int(seconds + .5), "second(s)"
	elif seconds < 3600: print int((seconds/60)+.5) , "minute(s)"
	else: 
Beispiel #25
0
def time_estimate(n,m,k,tau,lower_bounds, upper_bounds, r, rN, \
      max_normal, sorted_index, num_processes, multi_event, force):
    """
		Estimates the runtime with the specified bounds and parameters
	"""

    print "Estimating time..."
    if n is 3 and m > 30 and not force:
        print "\tWARNING: With n=3 and", m, "intervals, the runtime would likely be excessive. Try reducing the number of intervals below 25. Run with --FORCE to continue."
        exit(1)

    enum = Enumerator(n, m, k, tau, lower_bounds, upper_bounds, multi_event)
    opt = Optimizer(r, rN, m, n, tau, upper_bound=max_normal)

    if n == 2:
        TEST_NUM = 100
        count = count_number_matrices_2(m, upper_bounds, lower_bounds)
    else:
        TEST_NUM = 20
        count = count_number_matrices_3(m, upper_bounds, lower_bounds, enum)
    C = enum.generate_next_C()
    if C is False:
        print "ERROR: No valid Copy Number Profiles exist for these intervals within the bounds specified. Exiting..."
        exit(1)
    start = time.clock()
    for i in range(TEST_NUM):
        try:
            soln = opt.solve(C)
            C = enum.generate_next_C()
        except:
            break

    end = time.clock()
    avgVal = float(end - start) / TEST_NUM
    seconds = count * (float(end - start) / TEST_NUM) / num_processes
    print "\tEstimated Total Time:",
    if seconds < 60: print int(seconds + .5), "second(s)"
    elif seconds < 3600: print int((seconds / 60) + .5), "minute(s)"
    else:
Beispiel #26
0
def run_all_ones_possibilities(ps,
                               ones_possibilities,
                               gran_th,
                               acc_loss,
                               mode=None):
    for ones in ones_possibilities:
        optim = Optimizer(ps, (ones, ones + 1), gran_th, acc_loss)
        optim.print_runtime_eval()
        optim.run_mode(mode)
Beispiel #27
0
def do_optimization(n,m,k,tau,lower_bounds, upper_bounds, r, rN, \
            max_normal, sorted_index, max_processes, multi_event, get_values):
    """
    Performs the optimization for the given parameters with max_proccesses
    number of processes
    Returns a list of the best C matrices and associated mu values
    and likelihoods
    """
    enum = Enumerator(n, m, k, tau, lower_bounds, upper_bounds, multi_event)
    opt = Optimizer(r, rN, m, n, tau, upper_bound=max_normal)
    MAX_QUEUE_SIZE = int(10E6)
    try:
        queue = Queue(MAX_QUEUE_SIZE)  #Task queue for the processes
    except OSError:
        MAX_QUEUE_SIZE = 2**15 - 1
        queue = Queue(MAX_QUEUE_SIZE)

    returnQueue = Queue(
        MAX_QUEUE_SIZE)  #Shared queue for processes to return results

    processes = start_processes(max_processes, queue, opt, returnQueue, \
                sorted_index, get_values)

    # fix problem with missing first matrix
    #C = enum.generate_next_C()
    C = enum._C_to_array()
    count = 0
    while C is not False:
        count += 1
        queue.put(C, True)
        C = enum.generate_next_C()
    if count == 0:
        print "Error: No valid Copy Number Profiles exist for these intervals within the bounds specified. Exiting..."
        sys.exit(1)

    # Send STOP signal to all processes
    for i in range(max_processes - 1):
        queue.put(0)

    best = []
    for i in range(len(processes)):
        item = returnQueue.get()
        best.append(item)

    for p in processes:
        p.join()

    best = find_mins(best)
    return best
    def __init__(self, model, vocab):
        self.model = model
        self.report = True

        self.train_data = get_examples(train_data, vocab)
        self.batch_num = int(
            np.ceil(len(self.train_data) / float(train_batch_size)))
        self.dev_data = get_examples(dev_data, vocab)
        self.test_data = get_examples(test_data, vocab)

        # criterion
        self.criterion = nn.CrossEntropyLoss()

        # label name
        self.target_names = vocab.target_names

        # optimizer
        self.optimizer = Optimizer(model.all_parameters)

        # count
        self.step = 0
        self.early_stop = -1
        self.best_train_f1, self.best_dev_f1 = 0, 0
        self.last_epoch = epochs
Beispiel #29
0
    def start(self):
        train_loader, valid_loader = self.read_dataset()

        #create network
        self.NET = self.get_network()
        print("#net params:", sum(p.numel() for p in self.NET.parameters()))
        if self.USE_CUDA:
            self.NET.cuda()

        optimizer = Optimizer.get_optimizer(self.OPTIMIZER, self.NET,
                                            self.LEARNING_RATE)
        criterion = Loss.get_loss(self.LOSS)
        scheduler = ExponentialLR(optimizer, gamma=0.95)
        self.__train(train_loader, valid_loader, optimizer, criterion,
                     scheduler)
Beispiel #30
0
def train():
    rval = load_data('e:/Data/MNist/mnist.pkl.gz')
    train_set_x, train_set_y = rval[0]
    valid_set_x, valid_set_y = rval[1]
    test_set_x, test_set_y = rval[2]
    train_set_y = expand(train_set_y)
    valid_set_y = expand(valid_set_y)
    test_set_y = expand(test_set_y)
    used_set_x = train_set_x # The set actually used for training
    used_set_y = train_set_y

    # Create the unitary model.
    model = Nnet(input_size = used_set_x.shape[1], output_size = used_set_y.shape[1], activation = ['softmax'], hidden_sizes = [])
    solver = Solver("logistic", model, l1_regularizer = 0, l2_regularizer = 0.0001)
    optimizer = Optimizer(solver, method = 'sag', read_type = "random", stepsize = -1.0, max_updates = 50000, minibatch = 50, display = 500, max_data = sys.maxint)
    optimizer.train(used_set_x, used_set_y)

    # Create the pairwise dataset.
    randomOrder = np.random.permutation(used_set_x.shape[0])
    xRoll = used_set_x[randomOrder]
    yRoll = used_set_y[randomOrder]
    #pairData = np.hstack((used_set_x, xRoll, np.abs(used_set_x - xRoll)))
    pairData = np.abs(used_set_x - xRoll)
    pairLabel = np.sum(used_set_y * yRoll, axis = 1) # Ugly but it works.

    modelPair = Nnet(input_size = pairData.shape[1], output_size = 1, activation = ['sigm'], hidden_sizes = [])
    solverPair = Solver("logistic", modelPair, l1_regularizer = 0, l2_regularizer = 0.0001)
    optimizerPair = Optimizer(solverPair, method = 'sag', read_type = "random", stepsize = -1.0, max_updates = 50000, minibatch = 50, display = 500, max_data = sys.maxint)
    optimizerPair.train(pairData, pairLabel)

    # Predict on the train set.
    model.predict_batch(used_set_x)
    training_nll = np.mean(log_loss(model.output, used_set_y))
    training_classif_error = np.mean(classif_loss(model.output, used_set_y))
    print "Single train NLL = {}, Single train classif error = {}".format(training_nll,training_classif_error)

    model.predict_batch(valid_set_x)
    valid_nll = np.mean(log_loss(model.output, valid_set_y))
    valid_classif_error = np.mean(classif_loss(model.output, valid_set_y))
    print "Single valid NLL = {}, Single valid classif error = {}".format(valid_nll,valid_classif_error)

    # Create the joint model.
    pairwiseNNet = PairwiseNNet(model, modelPair)

    # Predict on the train set.
    pairwiseNNet.predict_batch(used_set_x)
    training_nll = np.mean(log_loss(pairwiseNNet.output, used_set_y))
    training_classif_error = np.mean(classif_loss(pairwiseNNet.output, used_set_y))
    print "Pair train NLL = {}, Pair train classif error = {}".format(training_nll,training_classif_error)

    pairwiseNNet.predict_batch(valid_set_x)
    valid_nll = np.mean(log_loss(pairwiseNNet.output, valid_set_y))
    valid_classif_error = np.mean(classif_loss(pairwiseNNet.output, valid_set_y))
    print "Pair valid NLL = {}, Pair valid classif error = {}".format(valid_nll,valid_classif_error)
Beispiel #31
0
def optimize(model,
             data,
             itermax=50,
             k=1,
             act_fn=round):  # round rounds .5 to zero
    weights = [model.W]
    acc, res = model.test_acc(data, k=k, act_fn=act_fn)
    accuracies = [acc]
    ress = [res]
    for _ in range(itermax):
        # import pdb
        # pdb.set_trace()
        model.W = Optimizer.gradient_descent(model, data)
        weights.append(model.W)
        acc, res = model.test_acc(data, k=k, act_fn=act_fn)
        accuracies.append(acc)
        ress.append(ress)
    return weights, accuracies, ress
def compile_model(model: object, config: object) -> object:
    print('Finished building model')
    print('Compiling model...')
    # set up metrics and optimizer
    metrics = ['accuracy']
    optimizer = Optimizer(config.optimizer).optimizer
    # compile model
    if config.complexity == 'celeba':
        model.compile(loss='mean_squared_error',
                      optimizer=optimizer,
                      metrics=metrics)
    else:
        model.compile(loss='binary_crossentropy',
                      optimizer=optimizer,
                      metrics=metrics)
    print('Finished compiling')
    model.summary()
    return model
class EdgeDetectModel:
    def __init__(self, inputs, targets, learning_rate, batch_size):
        self.batch_size = batch_size
        with tf.variable_scope("edge_detection"):
            self.output = self.edge_detection(inputs)
            self.cost = self.calculate_loss(self.output, targets)
            self.optimizer = Optimizer(learning_rate)
            self.train_op = self.optimizer.apply_gradients(self.cost)
        self.summary_op = tf.summary.merge(
            tf.get_collection(tf.GraphKeys.SUMMARIES, scope='edge_detection'))

    def edge_detection(self, enc_output):
        with tf.name_scope("output"):
            dense_cell = tf.layers.dense(inputs=enc_output,
                                         units=1,
                                         activation=tf.nn.tanh)
            edge_detect = tf.reduce_mean(dense_cell, axis=2)
        return edge_detect

    def mask_targets(self, targets, batch_size, as_float=False):
        # replace the first value with 0 no matter what
        if as_float:
            pad = 0.0
        else:
            pad = 0
        ending = tf.slice(targets, [0, 1], [-1, -1])
        masked_targets = tf.concat([tf.fill([batch_size, 1], pad), ending], 1)
        return masked_targets

    def calculate_loss(self, predictions, targets):
        with tf.name_scope("loss"):
            masked_predictions = self.mask_targets(predictions,
                                                   self.batch_size,
                                                   as_float=True)
            masked_targets = self.mask_targets(targets, self.batch_size)
            cost = tf.losses.mean_squared_error(masked_predictions,
                                                masked_targets)
            tf.summary.scalar("loss", cost)
        return cost
Beispiel #34
0
    def train(self, loader):
        net = Network_AE(vocab_size=len(self.VOCAB), drop_out=self.DROP_OUT)
        if self.USE_CUDA:
            net.cuda()

        optimizer = Optimizer.get_optimizer(self.OPTIMIZER, net,
                                            self.LEARNING_RATE)
        criterion = Loss.get_loss(self.LOSS)

        #train
        plot_train_loss = []

        net.train()
        for epoch in range(1, self.EPOCH + 1):
            train_loss = 0.0
            for batch_idx, (X, y) in enumerate(loader):
                y = torch.squeeze(y)
                if self.USE_CUDA:
                    X = Variable(X.cuda())
                    y = Variable(y.cuda())
                else:
                    X = Variable(X)
                    y = Variable(y)

                output = net(X)
                loss = criterion(output, y)
                train_loss += loss.data[0]

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            print("epoch:{}, train_loss:{}".format(epoch, train_loss))
            plot_train_loss.append(train_loss)

        #plot
        self.plot(plot_train_loss)
    def start(self):

        self.input_controller.fault_tolerance_request()
        self.input_controller.initial_groups_request()
        self.input_controller.blocked_vector_request()

        tested_faults = len(max(self.input_controller.initial_groups, key=len))

        processors = Processors(self.input_controller.initial_groups)
        generator = FormulasGenerator(processors, self.input_controller.fault_tolerance)
        selector = Selector(self.input_controller.blocked_vectors)
        tester = PrTester(processors)
        blocker = Blocker(self.input_controller.blocked_vectors, tester, selector, processors)


        '''
            For original formulas
        '''
        generator.generate(self.input_controller.initial_groups, self.original_formulas_file)
        optimizer_original = Optimizer(self.input_controller.fault_tolerance, processors, self.optimized_original_file)
        optimizer_original.optimize(generator.origin_formulas_arr)
        blocking_original_formulas = blocker.block_original(selector.select_group(optimizer_original.result_formulas))
        tester.test(self.result_of_testing_original_file, blocking_original_formulas, tested_faults, self.input_controller.fault_tolerance)

        '''
            For researched formulas
        '''
        self.input_controller.researched_way_request()
        if self.input_controller.researched_way:

            generator.generate(processors.form_groups_according_to_blocked_vector(self.input_controller.blocked_vectors), self.researched_formulas_file)
            optimizer_researched = Optimizer(self.input_controller.fault_tolerance, processors, self.optimized_researched_file)
            optimizer_researched.optimize(generator.origin_formulas_arr)
            #selector.select_group(optimizer_researched.result_formulas)
            blocking_researched_formulas = blocker.block_researched(selector.select_group(optimizer_researched.result_formulas))
            tester.test(self.result_of_testing_researched_file,blocking_researched_formulas, tested_faults, self.input_controller.fault_tolerance)




        self.input_controller.repeat_request()

        return self.input_controller.repeat
mysql.execute('delete from dfs_backtesting;')
mysql.execute('delete from dfs_avg_preds;')
mysql.execute('''
    insert into dfs_avg_preds
    select 
        b.game_date, a.player_name, pred as DFS_pred,
        a.DFS_target,b.salary,position
    from dfs_avgs as a 
    left join dfs_salaries as b on a.t_date = b.game_date and a.player_name = b.full_name
    left join playerHeightAndPosition as pos on a.playoffyear = pos.playoff_year and a.team = pos.team and a.player_id = pos.player_id
    where position in ('C','SF','PG','PF','SG')
    having game_date is not null and dfs_target is not null and 
        salary is not null and position is not null
''')

optimizer = Optimizer(max_iterations=1000000)

def get_point_threshold(game_date):
    mysql.execute('''
    create temporary table match_count
    select game_date, count(*) as count from matches where game_date = '{game_date}';
    '''.format(game_date=game_date))
    mysql.execute('''
    select average from match_count left join FanDuel_avg_worst_winning_score as worst on match_count.count = worst.games 
    ''')
    avg = 0
    for row in mysql.fetchall():
        avg = row['average']
    mysql.execute('drop table match_count')    
    return avg
 def __init__(self, log_level=0):
     '''Constructor'''
     Optimizer.__init__(self, log_level)