Example #1
0
def remove_not_needed_nodes(basis, current_weights, reply, target, capacity,
                            verbose):
    if verbose:
        print('Start removing not-needed nodes...')
    init_score = u.calculate_score(basis, current_weights)
    if init_score == 0:
        return basis
    if verbose:
        print('Initial score = ', init_score)
    if len(basis) == 0:
        return basis

    current_basis = basis.copy()
    flag_removed = 1
    while flag_removed:
        flag_removed = 0
        for b in current_basis:
            new_basis = current_basis.copy()
            new_basis.remove(b)
            confl = check_if_basis_has_no_conflicts(new_basis, reply, target,
                                                    capacity)
            if confl == 0:
                score = u.calculate_score(new_basis, current_weights)
                if score <= init_score:
                    current_basis = copy.copy(new_basis)
                    init_score = score
                    if verbose:
                        print(
                            'Remove node {} from basis. New score: {}'.format(
                                b, score))
                    flag_removed = 1
                    break
    return current_basis
Example #2
0
def solveSmall(G, s, t):
    # Remove node and delete 15 edges

    # location 0 corresponds to best to remove at first iteration
    bestEdgeToRemove = []
    newGraph = G.copy()
    bestC, bestK = smallHelper(newGraph, s, t, 15, [])
    maxScore = calculate_score(G, bestC, bestK)
    # print(maxScore)

    for i in range(1, 16):
        newGraph = G.copy()
        k = []
        for j in range(len(bestEdgeToRemove)):
            e = bestEdgeToRemove[j]
            newGraph.remove_edge(e[0], e[1])
            k.append((e[0], e[1]))
        e = EdgeToRemove(newGraph, s, t)

        if e is None:
            break
        bestEdgeToRemove.append(e)
        k.append((e[0], e[1]))
        c, k = smallHelper(newGraph, s, t, 15 - i, k)
        score = calculate_score(G, c, k)
        if score > maxScore:
            bestC, bestK = c, k
    return bestC, bestK
Example #3
0
def minimize_patch_weights_v1(scheme, etalon, weights, final_patch):

    current_basis = final_patch.__inputs__.copy()
    best_score = u.calculate_score(current_basis, weights)

    # Получить конус от базиса к входам
    inp_cone = scheme.subscheme_by_outputs(current_basis)

    # Step 1: Go to F circuit inputs
    current_patch = copy.deepcopy(final_patch)
    while 1:
        # Try to check all possible variations
        local_basis = current_patch.__inputs__.copy()
        best_lscr = u.calculate_score(local_basis, weights)
        best_signature = -1
        if len(local_basis) < 18:
            for sig in range(0, 2**len(local_basis)):
                lscore, lbasis = get_basis_score_for_given_signature(
                    inp_cone, local_basis, sig, weights)
                if lscore < best_lscr:
                    # print(lscore, len(lbasis))
                    best_lscr = lscore
                    best_signature = sig
        else:
            print('Random replaces...')
            # too big basis try random replaces
            for i in range(2**12):
                sig = random.randint(0, 2**len(local_basis))
                lscore, lbasis = get_basis_score_for_given_signature(
                    inp_cone, local_basis, sig, weights)
                if lscore < best_lscr:
                    # print(lscore, len(lbasis))
                    best_lscr = lscore
                    best_signature = sig

        # We found something. Need to copy additional elements to current_patch
        if best_signature != -1:
            current_patch = add_elements_to_patch(inp_cone, current_patch,
                                                  best_signature)
            print('New patch score: {}'.format(best_lscr))
        else:
            break

    if 0:
        # Step 2. Check if some basis nodes are inputs for some element in inp_cone.
        bs = set(current_patch.__inputs__)
        print(bs)
        for el in inp_cone.__elements__:
            inp_nodes = set(inp_cone.__elements__[el][1])
            # print(inp_nodes)
            if inp_nodes.issubset(bs):
                print('Found subset: {}'.format(inp_nodes))

    current_patch = rename_patch_internal_nodes(current_patch)
    return current_patch
Example #4
0
    def eval(self, mode):
        self.model.eval()
        if mode == 'dev':
            data_eval = self.dev_data
        else:
            data_eval = self.test_data

        total_loss = 0.0
        true_labels = []
        pred_labels = []

        with torch.no_grad():
            for sentence, tags in data_eval:
                sentence_in = prepare_sequence(sentence).to(self.device)
                targets = torch.tensor(tags, dtype=torch.long).to(self.device)

                loss = self.model.neg_log_likelihood(sentence_in, targets)
                _, pred = self.model(sentence_in)
                total_loss += loss.item()

                true_labels.extend(tags)
                pred_labels.extend(pred)

        precision, recall, F1_score = calculate_score(
            true_labels, pred_labels, len(self.dataset.label2id))
        return total_loss / len(data_eval), precision, recall, F1_score
Example #5
0
def read_output_file(G, path):
    """
    Parses and validates an output file

    Args:
        G: input graph corresponding to input file
        path: str, path to output file
    Returns:
        score: the difference between the new and original shortest path
    """
    H = G.copy()
    if len(H) >= 20 and len(H) <= 30:
        max_cities = 2
        max_roads = 15
    elif len(H) > 30 and len(H) <= 50:
        max_cities = 3
        max_roads = 30
    elif len(H) > 50 and len(H) <= 100:
        max_cities = 5
        max_roads = 100
    else:
        print('Input Graph is not of a valid size')

    assert H.has_node(0), 'Source vertex is missing in input graph'
    assert H.has_node(len(G) - 1), 'Target vertex is missing in input graph'

    cities = []
    removed_edges = []

    with open(path, "r") as fo:

        number_of_cities = fo.readline().strip()
        assert number_of_cities.isdigit(), 'Number of cities is not a digit'
        number_of_cities = int(number_of_cities)

        assert number_of_cities <= max_cities, 'Too many cities being removed from input graph'

        for _ in range(number_of_cities):
            city = fo.readline().strip()
            assert city.isdigit(), 'Specified vertex is not a digit'
            city = int(city)
            assert H.has_node(city), 'Specified vertex is not in input graph'
            cities.append(city)

        number_of_roads = fo.readline().strip()
        assert number_of_roads.isdigit(), 'Number of roads is not a digit'
        number_of_roads = int(number_of_roads)

        for _ in range(number_of_roads):
            road = fo.readline().split()
            assert len(
                road
            ) == 2, 'An edge must be specified with a start and end vertex'
            assert road[0].isdigit() and road[1].isdigit()
            u = int(road[0])
            v = int(road[1])
            assert H.has_edge(u, v), 'Specified edge is not in input graph'
            removed_edges.append((u, v))

    return utils.calculate_score(G, cities, removed_edges)
Example #6
0
    def find_scores(self, position):

        pos_x, pos_y = position
        positions = np.clip(
            np.array([(pos_x, pos_y), (pos_x - 1, pos_y), (pos_x + 1, pos_y),
                      (pos_x, pos_y - 1), (pos_x, pos_y + 1)]), 0, 10)

        scores = np.zeros((positions.shape[0], 1))

        for i, board_index in enumerate(positions):

            scores[i] = utils.calculate_score(board_index)

            total_neighbour_score = 0
            """
            already_encountered_start_pos = False
            for neighbor in utils.get_neighbour_indices(board_index):
                # Reaching end of the board
                if neighbor[0] == board_index[0] and neighbor[
                        1] == board_index[1]:
                    if already_encountered_start_pos:
                        neighbor_score = -1000 * 0.1
                    else:
                        already_encountered_start_pos = True
                        neighbor_score = utils.calculate_score(neighbor)
                # Free to roam
                else:
                    neighbor_score = utils.calculate_score(neighbor)
                total_neighbour_score += neighbor_score * 0.01
                """
            scores[i] += total_neighbour_score
        # print(scores)

        return scores, positions
 def create(cls, video_key, video_createtime, score, conn=None, *args, **kwargs):
     conn = cls.get_connection()
     now = datetime.now()
     _delta = now - datetime.utcfromtimestamp(video_createtime)
     hour = int(_delta.total_seconds() / 60)
     real_score = calculate_score(score, hour, 2)
     conn.zadd(cls.table_name, real_score, video_key)
Example #8
0
    def train_epoch(self):
        self.model.train()
        train_loss = 0.0
        true_labels = []
        pred_labels = []

        for sentence, tags in self.train_data:
            self.optimizer.zero_grad()

            sentence_in = prepare_sequence(sentence).to(self.device)
            targets = torch.tensor(tags, dtype=torch.long,
                                   device=self.device).to(self.device)

            loss = self.model.neg_log_likelihood(sentence_in, targets)
            loss.backward()
            nn.utils.clip_grad_norm_(self.model.parameters(), 1)
            self.optimizer.step()

            _, pred = self.model(sentence_in)
            train_loss += loss.item()
            true_labels.extend(tags)
            pred_labels.extend(pred)

        precision, recall, F1_score = calculate_score(
            true_labels, pred_labels, len(self.dataset.label2id))
        # precision, recall, F1_score = calculate_score(torch.tensor(true_labels), torch.tensor(pred_labels), len(self.dataset.label2id))
        return train_loss / len(self.train_data), precision, recall, F1_score
Example #9
0
def endpoint_logic():
    """Validate API request, submit the data to logic API, and return probability of survival.
    Request:
        {
            "request_id": "16fd2706-8baf-433b-82eb-8c7fada847da",
            "logic_id": "MD_00001",
            "data": [
                {
                    "passenger_id": "A00001",
                    "sex": "male",
                    "sib_sp": 0,
                    "parch": 0,
                    "fare": 15.0,
                    "embarked": "S",
                    "p_class": "2"
                },{
                    "passenger_id": "A00002",
                    "sex": "female",
                    "sib_sp": 2,
                    "parch": 1,
                    "fare": 30.0,
                    "embarked": "S",
                    "p_class": "1"
                }
            ] 
        }

    Response:
        {
            "request_id": "16fd2706-8baf-433b-82eb-8c7fada847da",
            "logic_id": "MD_00001",
            "prediction": [
                {
                    "passenger_id": "A00001",
                    "score": 0.9132
                },{
                    "passenger_id": "A00002",
                    "score": 0.1251
                }
            ],
            "timestamp": "2010-04-20T20:08:21.634121"
        }
    """

    api_request = request.json
    if not is_request_valid(api_request):
        response = Response({}, status=400, mimetype="application/json")
        return response

    calculate_score_response = calculate_score(
        path=logic_config[api_request["logic_id"]]["endpoint"],
        data=request.json)

    response = Response(json.dumps(calculate_score_response),
                        status=200,
                        mimetype="application/json")
    return response
Example #10
0
    def get_score(self, pos, time):
        """
        >>> w = CircularWorld('../test/')
        >>> w.get_score(np.array([2.1,2.9]), 0)
        0.0
        """

        return utils.calculate_score(pos, self.centers[time],
                                     self.center_radius, self.pos_limits)
Example #11
0
    def test_calculate_score(self):
        world = parse('input_files/example.in')
        print(world)

        # sample solution
        world['rows'] = [[UNAVAILABLE, 0, 0, 0, 3], [1, 1, 1, 2, 2]]
        world['pools'] = [[0, 2], [1, 3]]
        self.assertEqual(calculate_score(world, None), 5,
                         'ha fallado el test!')
Example #12
0
def get_best_patch_by_weights(patches_list, weights):
    if len(patches_list) == 0:
        return []
    scores_list = []
    for p in patches_list:
        score = u.calculate_score(p.__inputs__, weights)
        scores_list.append(score)
    indexes = sorted(range(len(scores_list)), key=lambda k: scores_list[k])
    current_best = patches_list[indexes[0]]
    return current_best
Example #13
0
def minimize_patch_weights_v2(scheme, etalon, weights, final_patch):
    current_basis = final_patch.__inputs__.copy()

    # Получить конус от базиса к входам
    inp_cone = scheme.subscheme_by_outputs(current_basis)
    current_patch = merge_circuits(scheme, inp_cone, final_patch)
    score = u.calculate_score(current_patch.__inputs__, weights)
    print('Input cone basis length: {} Weight: {}'.format(
        len(current_patch.__inputs__), score))
    current_patch = rename_patch_internal_nodes(current_patch)
    return current_patch
Example #14
0
def get_runs():
    rows = []
    conn = engine_runs.connect()
    # TODO prices/actions in separate route
    for row in conn.execute('select id, hypers, advantage_avg, advantages, uniques from runs where advantage_avg > 0').fetchall():
        row = dict(row.items())
        row['advantage_avg'] = utils.calculate_score(row['advantages'])
        rows.append(row)
    conn.close()

    print(len(rows), 'rows')
    return jsonify(rows)
Example #15
0
def single_file(lol):
    path = 'inputs/small/small-' + str(lol) + '.in'
    # path = 'test.in'
    # path = 'inputs/medium/medium-' + str(lol) +  '.in'
    # path = 'inputs/large/large-' + str(lol) +  '.in'
    G = read_input_file(path)
    print(G.nodes)
    # c, k = solve2(G)
    c, k = solve2(G)
    assert is_valid_solution(G, c, k)
    print("Shortest Path Difference: {}".format(calculate_score(G, c, k)))
    write_output_file(G, c, k, 'small-test.out')
Example #16
0
def get_runs():
    rows = []
    conn = engine_runs.connect()
    # TODO prices/actions in separate route
    for row in conn.execute(
            'select id, hypers, advantage_avg, advantages, uniques from runs'
    ).fetchall():
        row = dict(row.items())
        row['advantage_avg'] = utils.calculate_score(row)
        rows.append(row)
    conn.close()

    print(len(rows), 'rows')
    return jsonify(rows)
Example #17
0
    def evaluate(self):
        self.model.eval()
        print(len(self.val_dl))

        with torch.no_grad():

            all_outputs = list()
            all_targets = list()

            for batch in tqdm(self.val_dl, total=len(self.val_dl),
                              leave=False):
                data, target = batch
                data = data.to(self.device)
                target = target.to(self.device)
                outputs = self.model(data)
                loss = self.loss_func(outputs, target)
                if type(outputs) == type(()) and len(outputs) > 1:
                    _output = outputs[0]
                    for output in outputs:
                        _output = _output + output
                    outputs = _output / len(outputs)
                target = target.data.cpu()
                outputs = outputs.data.cpu()

                self.val_loss_avg.update(loss.cpu().item())

                all_outputs.append(outputs)
                all_targets.append(target)

            all_outputs = torch.cat(all_outputs, 0)
            all_targets = torch.cat(all_targets, 0)

        val_f1, val_acc = calculate_score(self.cfg, all_outputs, all_targets)

        self.logger.info('Validation Result:')

        self.logger.info('VAL_LOSS: %s, VAL_ACC: %s VAL_F1: %s \n' %
                         (self.val_loss_avg.avg, val_acc, val_f1))

        self.logger.info('-' * 20)

        if self.summary_writer:

            self.summary_writer.add_scalar('Valid/loss', self.val_loss_avg.avg,
                                           self.train_epoch)
            self.summary_writer.add_scalar('Valid/acc', np.mean(val_acc),
                                           self.train_epoch)
            self.summary_writer.add_scalar('Valid/f1', np.mean(val_f1),
                                           self.train_epoch)
Example #18
0
    def loss_fn(hypers):
        processed = post_process(hypers)
        network = network_spec(processed['custom'])

        agent = processed['ppo_agent']
        ## GPU split
        gpu_split = args.gpu_split
        if gpu_split != 1:
            fraction = .9 / gpu_split if gpu_split > 1 else gpu_split
            session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=fraction))
            agent['execution'] = {'type': 'single', 'session_config': session_config, 'distributed_spec': None}

        pprint(processed)
        pprint(network)

        env = BitcoinEnv(processed, args)
        agent = agents_dict['ppo_agent'](
            states=env.states,
            actions=env.actions,
            network=network,
            **agent
        )

        env.train_and_test(agent)

        acc = env.acc.test
        adv_avg = utils.calculate_score(acc.ep.returns)
        print(hypers, f"\nScore={adv_avg}\n\n")

        df = pd.DataFrame([dict(
            id=uuid.uuid4(),
            hypers=json.dumps(hypers),
            returns=list(acc.ep.returns),
            uniques=list(acc.ep.uniques),
            prices=list(env.data.get_prices(acc.ep.i, 0)),
            signals=list(acc.step.signals),
        )]).set_index('id')
        dtype = {
            'hypers': psql.JSONB,
            **{k: psql.ARRAY(psql.DOUBLE_PRECISION) for k in ['returns', 'signals', 'prices', 'uniques']},
        }
        with data.engine_runs.connect() as conn:
            df.to_sql('runs', conn, if_exists='append', index_label='id', dtype=dtype)

        # TODO restore save_model() from git

        agent.close()
        env.close()
        return -adv_avg  # maximize
Example #19
0
def process_file(solve, input_file):
    print("processing %s" % (input_file, ))
    output_file = get_output_file(input_file)

    world = parse(input_file=os.path.join('./input_files', input_file))
    analyze_world(world)
    t0 = time.time()
    solution = solve(world)
    t1 = time.time()
    print("solution took %.1f sec" % (t1 - t0, ))
    score = calculate_score(world, solution)
    t2 = time.time()
    print("calculate score took %.1f sec" % (t2 - t1, ))
    print("SCORE: %d" % score)
    write(solution, output_file)
    return score
Example #20
0
    def queryArticlesByHot(self, pagesize=32, page=1, user_id=None):
        try:
            query = self.session.query(Article).all()
            now = time.time()
            for row in query:
                if user_id is not None and self.isFaved(article_id=row.id,
                                                        user_id=user_id):
                    row.faved = True
                else:
                    row.faved = False
                delta_hours = int(
                    (decimal.Decimal(now) - row.timestamp) / 3600)
                hot = calculate_score(row.score, delta_hours)
                row.hot = hot
            query = sorted(query, reverse=True)

            for index, row in enumerate(query):
                row.rowid = index + 1
                row.shortURL = formatURL(row.URL)

            totalpage = int(math.ceil(len(query) / float(pagesize)))
            nextpagenum = 1
            privouspagenum = 1
            if totalpage > 1:
                if page >= totalpage:
                    hasnextpage = False
                    hasprivouspage = True
                    privouspagenum = totalpage - 1
                elif page <= 1:
                    hasnextpage = True
                    hasprivouspage = False
                    nextpagenum = 2
                else:
                    hasnextpage = True
                    hasprivouspage = True
                    privouspagenum = page - 1
                    nextpagenum = page + 1
            else:
                hasnextpage = False
                hasprivouspage = False

            query = query[pagesize * (page - 1):pagesize * page]

            return query, hasnextpage, nextpagenum, hasprivouspage, privouspagenum
        except:
            self.session.rollback()
 def queryArticlesByHot(self, pagesize = 32, page = 1, user_id = None):
     try:
         query = self.session.query(Article).all()
         now = time.time()
         for row in query:
             if user_id is not None and self.isFaved(article_id = row.id, user_id = user_id):
                 row.faved = True
             else:
                 row.faved = False
             delta_hours = int((decimal.Decimal(now)-row.timestamp)/3600)
             hot = calculate_score(row.score, delta_hours)
             row.hot = hot
         query = sorted(query, reverse=True)       
         
         for index, row in enumerate(query):
             row.rowid = index + 1
             row.shortURL = formatURL(row.URL)           
         
         totalpage = int(math.ceil(len(query)/float(pagesize)))
         nextpagenum = 1
         privouspagenum = 1
         if totalpage > 1:
             if page >= totalpage:
                 hasnextpage = False
                 hasprivouspage = True
                 privouspagenum = totalpage - 1  
             elif page <= 1:
                 hasnextpage = True
                 hasprivouspage = False
                 nextpagenum = 2 
             else:
                 hasnextpage = True
                 hasprivouspage = True
                 privouspagenum = page - 1  
                 nextpagenum = page + 1  
         else:
             hasnextpage = False
             hasprivouspage = False
             
         
         query = query[pagesize*(page-1):pagesize*page]
         
         return query, hasnextpage, nextpagenum, hasprivouspage, privouspagenum
     except:
         self.session.rollback()
Example #22
0
def minimax(curr_node, player):
    score_list = []

    score = utils.calculate_score(curr_node.get_board(), player)

    if score in [math.inf, -math.inf]:
        curr_node.set_score(score)
        print(curr_node.get_board())
        return score

    if curr_node.get_depth() == 2:
        print(curr_node.get_board())
        curr_node.set_score(score)
        return score

    moves = utils.get_available_moves(curr_node.get_board())

    if moves == []:
        print(curr_node.get_board())
        curr_node.set_score(score)
        return score

    for move in moves:
        new_board = utils.perform_move(copy.deepcopy(curr_node.get_board()),
                                       move, player)
        new_node = node.Node(curr_node, new_board,
                             curr_node.get_depth() + 1, move)
        curr_node.add_lower(new_node)

        if player == 1:
            move_score = minimax(new_node, 2)
        else:
            move_score = minimax(new_node, 1)

        score_list.append(move_score)

    if (player == 1):
        score = max(score_list)
        curr_node.set_score(score)
    else:
        score = min(score_list)
        curr_node.set_score(score)

    return score
    def execute(self, actions):
        flat, hydrated, network = self.get_hypers(actions)

        env = BitcoinEnv(flat, name=self.agent)
        agent = agents_dict[self.agent](
            states_spec=env.states,
            actions_spec=env.actions,
            network_spec=network,
            **hydrated
        )

        env.train_and_test(agent, self.cli_args.n_steps, self.cli_args.n_tests, -1)

        step_acc, ep_acc = env.acc.step, env.acc.episode
        adv_avg = utils.calculate_score(ep_acc.advantages)
        print(flat, f"\nAdvantage={adv_avg}\n\n")

        sql = """
          insert into runs (hypers, advantage_avg, advantages, uniques, prices, actions, agent, flag) 
          values (:hypers, :advantage_avg, :advantages, :uniques, :prices, :actions, :agent, :flag)
          returning id;
        """
        row = self.conn_runs.execute(
            text(sql),
            hypers=json.dumps(flat),
            advantage_avg=adv_avg,
            advantages=list(ep_acc.advantages),
            uniques=list(ep_acc.uniques),
            prices=list(env.prices),
            actions=list(step_acc.signals),
            agent=self.agent,
            flag=self.cli_args.net_type
        ).fetchone()

        if ep_acc.advantages[-1] > 0:
            _id = str(row[0])
            directory = os.path.join(os.getcwd(), "saves", _id)
            filestar = os.path.join(directory, _id)
            os.mkdir(directory)
            agent.save_model(filestar)

        agent.close()
        env.close()
        return adv_avg
Example #24
0
    def execute(self, actions):
        flat, hydrated, network = self.get_hypers(actions)

        env = BitcoinEnv(flat, self.cli_args)
        agent = agents_dict[self.agent](
            states=env.states,
            actions=env.actions,
            network=network,
            **hydrated
        )

        env.train_and_test(agent, self.cli_args.n_steps, self.cli_args.n_tests, -1)

        step_acc, ep_acc = env.acc.step, env.acc.episode
        adv_avg = utils.calculate_score(ep_acc.returns)
        print(flat, f"\nScore={adv_avg}\n\n")

        sql = """
          insert into runs (hypers, sharpes, returns, uniques, prices, signals, agent, flag)
          values (:hypers, :sharpes, :returns, :uniques, :prices, :signals, :agent, :flag)
          returning id;
        """
        row = self.conn_runs.execute(
            text(sql),
            hypers=json.dumps(flat),
            sharpes=list(ep_acc.sharpes),
            returns=list(ep_acc.returns),
            uniques=list(ep_acc.uniques),
            prices=list(env.prices),
            signals=list(step_acc.signals),
            agent=self.agent,
            flag=self.cli_args.net_type
        ).fetchone()

        if ep_acc.returns[-1] > 0:
            _id = str(row[0])
            directory = os.path.join(os.getcwd(), "saves", _id)
            filestar = os.path.join(directory, _id)
            os.makedirs(directory, exist_ok=True)
            agent.save_model(filestar)

        agent.close()
        env.close()
        return adv_avg
Example #25
0
    def get_score(self, pos, time):
        """
        >>> w = RectangularWorld('../test/')
        >>> w.get_score(np.array([2.1,2.9]), 0)
        0.0
        """

        if self.full_noise_file:
            return utils.get_score(pos, time, self.noise_location,
                                   self.pos_limits, self.noise_line_width)
        else:
            if self.edge_goal:
                return utils.wall_score(pos, self.centers[time],
                                        self.center_radius, self.pos_limits,
                                        self.shape)
            else:
                return utils.calculate_score(pos, self.centers[time],
                                             self.center_radius,
                                             self.pos_limits, self.shape)
Example #26
0
def solve(G):
    """
    Args:
        G: networkx.Graph
    Returns:
        c: list of cities to remove
        k: list of edges to remove
    """
    random.seed(datetime.now())
    iterations = 0
    best_c, best_k = [], []
    if len(list(G.nodes)) <= 30:
        iterations = 500
    elif len(list(G.nodes)) <= 50:
        iterations = 200
    elif len(list(G.nodes)) <= 100:
        iterations = 15
    first_time, best_score, is_medium_or_large = True, 0.0, False
    for i in range(iterations):
        m = 0
        t = len(list(G.nodes)) - 1
        if len(list(G.nodes)) <= 30:
            max_k, max_c, m = 15, 1, 100
        elif len(list(G.nodes)) <= 50:
            max_k, max_c, m = 50, 3, 100
            is_medium_or_large = True
        elif len(list(G.nodes)) <= 100:
            max_k, max_c, m = 100, 5, 500
            is_medium_or_large = True
        else:
            max_k, max_c = 0, 0
        c, k, m, max_c, max_k, first_time = helper(G, m, t, max_c, max_k,
                                                   first_time,
                                                   is_medium_or_large)
        first_time = False
        if is_valid_solution(G, c, k, t):
            curr_score = calculate_score(G, c, k, t)
            if curr_score > best_score:
                best_score = curr_score
                best_c, best_k = c, k
    print("END")
    return best_c, best_k
Example #27
0
def stories(page=1, limit=25, story_type=None, over_filter=0):
    now = timezone.now()
    stories = Stories.objects.all()
    # Only show the last week
    enddate = datetime.datetime.now(tz)
    startdate = enddate - datetime.timedelta(days=14)
    stories = stories.filter(time__range=[startdate, enddate])
    if story_type:
        if story_type == 'best':
            stories = stories.order_by('-score')
        elif story_type == 'newest':
            stories = stories.order_by('-time')
        elif story_type == 'self':
            stories = stories.filter(selfpost=True)
        elif story_type == 'show':
            stories = stories.filter(title__startswith='Show HN')
        elif story_type == 'ask':
            stories = stories.filter(selfpost=True, title__startswith='Ask HN')
        elif story_type == 'poll':
            stories = stories.filter(poll=True)
        else:
            stories = stories.filter(story_type=story_type)
    if over_filter > 0:
        stories = stories.filter(score__gte=over_filter)
    if story_type not in ['newest', 'best']:
        # HN Sorting
        sorted_stories = []
        for story in stories:
            time_hours = (now - story.time).total_seconds() / 3600
            score = utils.calculate_score(story.score, time_hours)
            sorted_stories.append({'story': story, 'score': score})
        sorted_stories = sorted(sorted_stories,
                                key=lambda story: story['score'],
                                reverse=True)
        stories = [story['story'] for story in sorted_stories]
    paginator = Paginator(stories, limit)
    try:
        stories = paginator.page(page)
    except (InvalidPage, EmptyPage):
        stories = paginator.page(paginator.num_pages)
    return stories
Example #28
0
def get_basis_score_for_given_signature(cir, basis, sig, weights):
    basis_new = []
    for i in range(len(basis)):
        val = (sig >> i) & 1
        if val == 0:
            basis_new.append(basis[i])
        else:
            if basis[i] in cir.__elements__:
                flag_ok = 1
                for ael in cir.__elements__[basis[i]][1]:
                    if ael not in weights:
                        flag_ok = 0
                        break
                if flag_ok:
                    basis_new += cir.__elements__[basis[i]][1]
                else:
                    basis_new.append(basis[i])
            else:
                basis_new.append(basis[i])
    scr = u.calculate_score(basis_new, weights)
    return scr, basis_new
Example #29
0
def stories(page=1, limit=25, story_type=None, over_filter=0):
    now = timezone.now()
    stories = Stories.objects.all()
    # Only show the last week
    enddate = datetime.datetime.now(tz)
    startdate = enddate - datetime.timedelta(days=14)
    stories = stories.filter(time__range=[startdate, enddate])
    if story_type:
        if story_type == 'best':
            stories = stories.order_by('-score')
        elif story_type == 'newest':
            stories = stories.order_by('-time')
        elif story_type == 'self':
            stories = stories.filter(selfpost=True)
        elif story_type == 'show':
            stories = stories.filter(title__startswith='Show HN')
        elif story_type == 'ask':
            stories = stories.filter(selfpost=True, title__startswith='Ask HN')
        elif story_type == 'poll':
            stories = stories.filter(poll=True)
        else:
            stories = stories.filter(story_type=story_type)
    if over_filter > 0:
        stories = stories.filter(score__gte=over_filter)
    if story_type not in ['newest', 'best']:
        # HN Sorting
        sorted_stories = []
        for story in stories:
            time_hours = (now - story.time).total_seconds() / 3600
            score = utils.calculate_score(story.score, time_hours)
            sorted_stories.append({'story': story, 'score': score})
        sorted_stories = sorted(sorted_stories, key=lambda story: story['score'], reverse=True)
        stories = [story['story'] for story in sorted_stories]
    paginator = Paginator(stories, limit)
    try:
        stories = paginator.page(page)
    except (InvalidPage, EmptyPage):
        stories = paginator.page(paginator.num_pages)
    return stories
Example #30
0
def remove_c(G, c, V):
    ret = []
    nodes = list(G.nodes())
    nodes.remove(0)
    nodes.remove(V - 1)
    #ret has node
    while len(ret) < c:
        temp = -1
        curr = -1
        for j in nodes:
            H = G.copy()
            H.remove_node(j)
            if nx.is_connected(H):
                score = calculate_score(G, [j], [])
                if score > curr:
                    temp = j
                    curr = score
        if len(nodes) == 0 or temp == -1:
            return ret
        #print(temp)
        nodes.remove(temp)
        if is_valid_solution(G, ret + [temp], []):
            ret.append(temp)
    return ret
Example #31
0
    inputs = glob.glob('inputs/*')
    for input_path in inputs:   # Iterate through folders in inputs
        files = glob.glob(input_path + "/*")
        for file_path in files:  # Iterates through every file in every folder

            print("Begin processing {}".format(file_path))
            G = read_input_file(file_path)  # Reads in the next graph

            size = input_path[7:]
            v, e = solve(G, size)  # Calculates the list of vertices (v) and edges (e) to remove

            output_path = 'outputs/' + file_path[7:][:-3] + '.out'
            currBest_distance = read_output_file(G, output_path)
            #currBest_distance = -1 # DEBUG
            this_distance = calculate_score(G, v, e)

            if currBest_distance >= this_distance:
                print("Current output is better or equal to this output. No output file written.")
            else:
                overall_improvements += 1
                print("Output distance IMPROVED by: " + str(this_distance - currBest_distance))
                print("NEW shortest path is length: " + str(this_distance))
                write_output_file(G, v, e, output_path)

    print("TOTAL OUTPUTS IMPROVED: " + str(overall_improvements))


# Here's an example of how to run your solver.

# Usage: python3 solver.py test.in
Example #32
0
    print(G.nodes)
    # c, k = solve2(G)
    c, k = solve2(G)
    assert is_valid_solution(G, c, k)
    print("Shortest Path Difference: {}".format(calculate_score(G, c, k)))
    write_output_file(G, c, k, 'small-test.out')


if __name__ == '__main__':
    # single_file(62)
    for i in range(1, 301):
        print("INPUT: ", i)
        path = 'inputs/medium/medium-' + str(i) + '.in'
        G = read_input_file(path)
        c, k = solve2(G)
        assert is_valid_solution(G, c, k)
        print("Shortest Path Difference: {}".format(calculate_score(G, c, k)))
        output_path = 'outputs/medium-' + str(i) + '.out'
        write_output_file(G, c, k, output_path)

# For testing a folder of inputs to create a folder of outputs, you can use glob (need to import it)
# if __name__ == '__main__':
#     inputs = glob.glob('inputs/*')
#     for input_path in inputs:
#         output_path = 'outputs/' + basename(normpath(input_path))[:-3] + '.out'
#         G = read_input_file(input_path)
#         c, k = solve(G)
#         assert is_valid_solution(G, c, k)
#         distance = calculate_score(G, c, k)
#         write_output_file(G, c, k, output_path)
def main():
    import gp
    from sklearn.feature_extraction import DictVectorizer

    parser = argparse.ArgumentParser()
    parser.add_argument('--guess', type=int, default=-1, help="Run the hard-coded 'guess' values first before exploring")
    parser.add_argument('--boost', action="store_true", default=False, help="Use custom gradient-boosting optimization, or bayesian optimization?")
    utils.add_common_args(parser)
    args = parser.parse_args()

    # Encode features
    hsearch = HSearchEnv(cli_args=args)
    hypers_, hardcoded = hsearch.hypers, hsearch.hardcoded
    hypers_ = {k: v for k, v in hypers_.items() if k not in hardcoded}
    hsearch.close()

    # Build a matrix of features,  length = max feature size
    max_num_vals = 0
    for v in hypers_.values():
        l = len(v['vals'])
        if l > max_num_vals: max_num_vals = l
    empty_obj = {k: None for k in hypers_}
    mat = pd.DataFrame([empty_obj.copy() for _ in range(max_num_vals)])
    for k, hyper in hypers_.items():
        for i, v in enumerate(hyper['vals']):
            mat.loc[i,k] = v
    mat.ffill(inplace=True)

    # Above is Pandas-friendly stuff, now convert to sklearn-friendly & pipe through OneHotEncoder
    vectorizer = DictVectorizer()
    vectorizer.fit(mat.T.to_dict().values())
    feat_names = vectorizer.get_feature_names()

    # Map TensorForce actions to GP-compatible `domain`
    # instantiate just to get actions (get them from hypers above?)
    bounds = []
    for k in feat_names:
        hyper = hypers_.get(k, False)
        if hyper:
            bounded, min_, max_ = hyper['type'] == 'bounded', min(hyper['vals']), max(hyper['vals'])
        b = [min_, max_] if bounded else [0, 1]
        bounds.append(b)

    def hypers2vec(obj):
        h = dict()
        for k, v in obj.items():
            if k in hardcoded: continue
            if type(v) == bool: h[k] = float(v)
            else: h[k] = v or 0.
        return vectorizer.transform(h).toarray()[0]

    def vec2hypers(vec):
        # Reverse the encoding
        # https://stackoverflow.com/questions/22548731/how-to-reverse-sklearn-onehotencoder-transform-to-recover-original-data
        # https://github.com/scikit-learn/scikit-learn/issues/4414
        reversed = vectorizer.inverse_transform([vec])[0]
        obj = {}
        for k, v in reversed.items():
            if '=' not in k:
                obj[k] = v
                continue
            if k in obj: continue  # we already handled this x=y logic (below)
            # Find the winner (max) option for this key
            score, attr, val = v, k.split('=')[0], k.split('=')[1]
            for k2, score2 in reversed.items():
                if k2.startswith(attr + '=') and score2 > score:
                    score, val = score2, k2.split('=')[1]
            obj[attr] = val

        # Bools come in as floats. Also, if the result is False they don't come in at all! So we start iterate
        # hypers now instead of nesting this logic in reversed-iteration above
        for k, v in hypers_.items():
            if v['type'] == 'bool':
                obj[k] = bool(round(obj.get(k, 0.)))
        return obj

    # Specify the "loss" function (which we'll maximize) as a single rl_hsearch instantiate-and-run
    def loss_fn(params):
        hsearch = HSearchEnv(cli_args=args)
        reward = hsearch.execute(vec2hypers(params))
        hsearch.close()
        return [reward]

    guess_i = 0
    while True:
        # Every iteration, re-fetch from the database & pre-train new model. Acts same as saving/loading a model to disk,
        # but this allows to distribute across servers easily
        conn_runs = data.engine_runs.connect()
        sql = "select hypers, advantages, advantage_avg from runs where flag=:f"
        runs = conn_runs.execute(text(sql), f=args.net_type).fetchall()
        conn_runs.close()
        X, Y = [], []
        for run in runs:
            X.append(hypers2vec(run.hypers))
            Y.append([utils.calculate_score(run.advantages)])
        boost_model = print_feature_importances(X, Y, feat_names)

        if args.guess != -1:
            guess = {k: v['guess'] for k, v in hypers_.items()}
            guess.update(utils.guess_overrides[args.guess][guess_i])
            loss_fn(hypers2vec(guess))

            guess_i += 1
            if guess_i > len(utils.guess_overrides[args.guess])-1:
                args.guess = -1  # start on GP

            continue

        if args.boost:
            print('Using gradient-boosting')
            boost_optimization(
                model=boost_model,
                loss_fn=loss_fn,
                bounds=np.array(bounds),
                x_list=X,
                y_list=Y
            )
        else:
            # Evidently duplicate values break GP. Many of these are ints, so they're definite duplicates. Either way,
            # tack on some small epsilon to make them different (1e-6 < gp.py's min threshold, make sure that #'s not a
            # problem). I'm concerned about this since many hypers can go below that epislon (eg learning-rate).
            for x in X:
                for i, v in enumerate(x):
                    x[i] += np.random.random() * 1e-6
            gp.bayesian_optimisation2(
                loss_fn=loss_fn,
                bounds=np.array(bounds),
                x_list=X,
                y_list=Y
            )
Example #34
0
# if __name__ == '__main__':
#     assert len(sys.argv) == 2
#     path = sys.argv[1]
#     G = read_input_file(path)
#     H = G.copy()
#     c, k = solve2(H)
#     assert is_valid_solution(G, c, k)
#     print("Shortest Path Difference: {}".format(calculate_score(G, c, k)))
#     write_output_file(G, c, k, 'outputs/test.out')
# -----------------------------

# -----------------------------
# Run for folder tests -- FOLDER TESTER --

# For testing a folder of inputs to create a folder of outputs, you can use glob (need to import it)
if __name__ == '__main__':
    inputs = glob.glob('inputs/inputs/small/*')
    distances = []
    for input_path in inputs:
        output_path = 'outputs/small/' + basename(normpath(input_path))[:-3] + '.out'
        G = read_input_file(input_path)
        H = G.copy()
        c, k = solve2(H)
        assert is_valid_solution(G, c, k)
        distances.append((basename(normpath(input_path))[:-3], calculate_score(G, c, k)))
        write_output_file(G, c, k, output_path)
    with open('outputs/distances_large.txt', "w") as fo:
        for d in distances:
            fo.write(d[0] + " " + str(d[1]) + "\n")
# -----------------------------
Example #35
0
def search4bases(weights, reply, target_vector_int, cap, mit, verbose):
    bases = []
    scores = []
    if cap == 0:
        return [[]]

    #if mit == 0:
    # backward greedy
    #if verbose:
    #    print('\n\n=====================backward greedy=============================')
    #basis = gs.backward_greedy_search(weights, reply.copy(), target_vector_int, cap, verbose)
    #basis = gs.remove_not_needed_nodes(basis, weights, reply, target_vector_int, cap, verbose)
    #if len(basis) < 19:
    #    basis, scors = gs.multi_replacer(basis, weights, reply, target_vector_int, cap, 3, verbose)
    #    scores += scors
    #    bases += basis

    if mit == 0:
        if verbose:
            print(
                '\n\n=====================absolute greedy============================='
            )
        # forward greedy absolute
        basis = gs.greedy_search(weights, reply.copy(), target_vector_int, cap,
                                 'absolute', verbose)
        basis = gs.remove_not_needed_nodes(basis, weights, reply,
                                           target_vector_int, cap, verbose)
        if len(basis) < 19:
            if len(basis) < 15:
                basis, scors = gs.multi_replacer(basis, weights, reply,
                                                 target_vector_int, cap, 1,
                                                 verbose)
            else:
                scors = [u.calculate_score(basis, weights)]
                basis = [basis]
            scores += scors
            bases += basis

    if verbose:
        print(
            '\n\n=====================weighted greedy============================='
        )
    # forward greedy weighted
    basis = gs.greedy_search(weights, reply.copy(), target_vector_int, cap,
                             'weighted', verbose)
    basis = gs.remove_not_needed_nodes(basis, weights, reply,
                                       target_vector_int, cap, 1)
    if len(basis) < 19:
        if len(basis) < 15:
            basis, scors = gs.multi_replacer(basis, weights, reply,
                                             target_vector_int, cap, 1,
                                             verbose)
        else:
            scors = [u.calculate_score(basis, weights)]
            basis = [basis]
        scores += scors
        bases += basis
    # sorting
    while None in bases:
        bases.remove(None)
    while None in scores:
        scores.remove(None)
    bases = sorted(bases, key=lambda k: scores[bases.index(k)])
    print('Bases list:', bases)
    return bases
Example #36
0
def ic(F, G, weights, patch_file, out_file, time_limit=1000000):
    Bk = '\033[0m'  # normal
    Rd = '\033[31m'  # red
    Wh = '\033[37m'  # white
    Bl = '\033[34m'  # blue

    start = time.time()
    sys.setrecursionlimit(20000)
    tgts, scheme = rw.read_verilog(F)
    _, etalon = rw.read_verilog(G)
    weights = rw.read_weights(weights)
    dep_outs, sign_inps = u.tgt_influence(scheme, etalon, tgts)
    outs_to_process = [
        item for sublist in list(dep_outs.values()) for item in sublist
    ]
    eq_outs = [out for out in scheme.__outputs__ if out not in outs_to_process]
    sys.stdout.write(Wh + 'Check the rest of outputs on equivalence... ')
    sys.stdout.flush()
    if not eq.check_clean_outputs(F, G, eq_outs):
        print(Rd + 'ERROR: impossible to create patch')
        exit()
    print('ok')

    print(Bk +
          '================================================================')
    print(Rd +
          '                    Initial targets split                       ')
    print(Bk +
          '================================================================')

    print(Bk + str(tgts) + ' ' + str(dep_outs))
    independent_targets = get_fully_independent_targets(dep_outs)
    print('Independent target groups: {} Targets split: {}'.format(
        len(independent_targets), independent_targets))

    if len(independent_targets
           ) == 1 and DONT_CUT_CIRCUIT_FOR_SINGLE_TARGET_BLOCK:
        final_patch = get_patch_for_independent_target_list(
            tgts, scheme, etalon, weights, time_limit)
    else:
        all_patches = dict()
        for target_part in sorted(independent_targets):
            print(
                'Run independent patch search for targets: {} and outputs: {}'.
                format(target_part, independent_targets[target_part]))
            new_scheme, new_etalon = create_subpart_for_outputs_v1(
                scheme, etalon, tgts, target_part,
                independent_targets[target_part])
            # new_scheme, new_etalon, new_weights = create_subpart_for_outputs_v2(scheme, etalon, weights, tgts, target_part, independent_targets[target_part])
            # new_scheme.print_verilog_in_file('sch_VCC.v', 'VCC')
            independent_patch = get_patch_for_independent_target_list(
                list(target_part), new_scheme, new_etalon, weights, time_limit)
            all_patches[target_part] = [copy.deepcopy(independent_patch)]
        final_patch = u.patch_merger(all_patches)

    if final_patch == None:
        return 0, 0, 0, time.time() - start
    # Минимизация
    print(Bk +
          '================================================================')
    print(Rd +
          '                     Patch minimization                         ')
    print(Bk +
          '================================================================')

    final_patch = minimize_patch_weights(scheme, etalon, weights, final_patch)

    score = u.calculate_score(final_patch.__inputs__, weights)
    # write in output directory
    final_patch.print_verilog_in_file(patch_file, 'patch')
    print('Patch size before elements minimizer:', final_patch.elements())
    # minimizing patch
    rw.minimize_patch_abc(patch_file)
    _, final_patch = rw.read_verilog(patch_file)

    print(Rd + 'BASIS: ')
    print(Bk + ' Score:', score)
    print(' Patch size:', final_patch.elements())
    print(' Number of nodes:', len(final_patch.__inputs__))

    # генерируем пропатченный файл
    rw.generate_out_verilog(F, final_patch.__outputs__, final_patch.__inputs__,
                            out_file)

    # финальная верификация
    eq.patch_circuit(out_file, patch_file, G)
    print(Rd + 'TIMING: ')
    timing = time.time() - start
    print(Bk, timing, ' seconds')

    # проверяем на эквивалентность
    print(Rd + 'EQUIVALENCE: ')
    eql = eq.equivalence_check_abc()
    if eql == 1:
        print(Bk + '  SUCCESS')
    else:
        print(Bk + '  FAIL')

    return eql, score, final_patch.elements(), int(timing)
Example #37
0
def guess_number():
    secret = xor(int(request.form.get('encrypted')))
    guess = int(request.form.get('guess'))
    return jsonify(calculate_score(secret, guess))