Example #1
0
 def run_experiment(self):
     '''
     Run especified experiments
     :return: Dict with metrics
     '''
     pre = Preprocessing()
     print('Reading Data')
     train_df = DataSource().read_data(etapa_treino=True)
     test_df, y_test = DataSource().read_data(etapa_treino=False)
     y_test = y_test['SalePrice']
     print('Preprocessing Data')
     X_train, y_train = pre.process(train_df, etapa_treino=True)
     print('Processing Test Data')
     X_test = pre.process(test_df[pre.train_features], etapa_treino=False)
     print('Training Model')
     models = Experiments().train_model(X_train, y_train)
     print('Running Metrics')
     for model in models.keys():
         print(model)
         y_pred = models[model].predict(X_test)
         print(Metrics().calculate_regression(y_test, pd.Series(y_pred)))
         metrics = Metrics().calculate_regression(y_test, pd.Series(y_pred))
         pd.DataFrame.from_dict(
             metrics, orient='index').to_csv('../output/' + model + '.csv')
     return metrics
Example #2
0
def debug_updated_weights(opts, steps, weights, data):
    """ Various debug plots for updated weights of training points.

    """
    assert data.num_points == len(weights), 'Length mismatch'
    ws_and_ids = sorted(zip(weights, range(len(weights))))
    num_plot = 6 * 16
    if num_plot > len(weights):
        return
    ids = [_id for w, _id in ws_and_ids[:num_plot]]
    plot_points = data.data[ids]
    metrics = Metrics()
    metrics.make_plots(opts, steps, None, plot_points, prefix='d_least_')
    ids = [_id for w, _id in ws_and_ids[-num_plot:]]
    plot_points = data.data[ids]
    metrics = Metrics()
    metrics.make_plots(opts, steps, None, plot_points, prefix='d_most_')
    plt.clf()
    ax1 = plt.subplot(211)
    ax1.set_title('Weights over data points')
    plt.plot(range(len(weights)), sorted(weights))
    plt.axis([0, len(weights), 0., 2. * np.max(weights)])
    if data.labels is not None:
        all_labels = np.unique(data.labels)
        w_per_label = -1. * np.ones(len(all_labels))
        for _id, y in enumerate(all_labels):
            w_per_label[_id] = np.sum(weights[np.where(data.labels == y)[0]])
        ax2 = plt.subplot(212)
        ax2.set_title('Weights over labels')
        plt.scatter(range(len(all_labels)), w_per_label, s=30)
    filename = 'data_w{:02d}.png'.format(steps)
    create_dir(opts['work_dir'])
    plt.savefig(o_gfile((opts["work_dir"], filename), 'wb'))
Example #3
0
def compare():
    f1 = ',/unet11.json'
    f2 = ',/unet16n.json'

    with open(f1, 'r') as f:
        data1 = json.load(f)

    with open(f2, 'r') as f:
        data2 = json.load(f)

    train_metrics = Metrics()
    val_metrics = Metrics()

    print(f1, f2)
    for i, item1 in enumerate(data1['items']):
        item2 = data2['items'][i]
        coef1 = Coef(**item1['coef'])
        coef2 = Coef(**item2['coef'])
        print(item1['name'], coef1.pjac, coef2.pjac)
        print()
        continue

        if re.match(r'mean', item['type']):
            continue
        if item['type'] == 'train':
            m = train_metrics
        if item['type'] == 'val':
            m = val_metrics
        m.append_coef(coef)

    exit()
    print(fp)
    print('train: ', train_metrics.avg_coef().to_str())
    print('val  : ', val_metrics.avg_coef().to_str())
    print()
def main():
    print('Loading pretrained Word2Vec embeddings for wiki toxicity data...')
    embeddings = Word2Vec.load('../data/personal_attacks/output/word2vec_model.model')
    vocab = np.load('../data/personal_attacks/output/wikimedia-personal-attacks-embeddings.npy')
    print(f'Vocab size = {len(vocab)}')

    # Build word2id lookup for vocabulary
    word2id = {}
    for key, val in embeddings.wv.vocab.items():
        idx = val.__dict__['index']
        vocab[idx, :] = embeddings[key]
        word2id[key] = idx

    # Load data to train the model with
    tokens, labels = load_csv_data()
    data = create_data_dictionary(tokens, labels, word2id, unk=len(vocab) - 1)
    x, y = prepare_data_for_training_lstm(data)

    # Pad/trim the input sequences to all be the same length
    print('Padding sequences (samples x time)...')
    x = sequence.pad_sequences(x, maxlen=hps['pad_length'])
    print('x shape:', x.shape)

    # Split the data for cross validation
    strat_kfold = StratifiedKFold(n_splits=hps['cv_num_splits'], shuffle=True)

    # Afterwards, we will print the results to a CSV file to be analyzed later
    results_file_name = f'../results/lstm/results_{time()}.csv'
    results_file = open(results_file_name, mode='a')

    # Build, train, and evaluate the LSTM model
    results_file.write(Metrics.get_csv_header('lstm', 'macro') + '\n')
    for train, test in strat_kfold.split(x, y):
        lstm_metrics = Metrics()
        model = build_lstm_model(len(vocab), vocab)
        model.fit(x[train], y[train], batch_size=hps['batch_size'],
                  epochs=hps['epochs'], callbacks=[lstm_metrics],
                  validation_data=(x[test], y[test]))

        acc = lstm_metrics.val_accs[-1]
        prec = lstm_metrics.val_precisions[-1]
        recall = lstm_metrics.val_recalls[-1]
        f1 = lstm_metrics.val_f1s[-1]
        results_file.write(f'lstm, {acc}, {prec}, {recall}, {f1}' + '\n')

    # Build, train, and evaluate the GRU model
    results_file.write(Metrics.get_csv_header('gru', 'macro') + '\n')
    for train, test in strat_kfold.split(x, y):
        gru_metrics = Metrics()
        model = build_gru_model(len(vocab), vocab)
        model.fit(x[train], y[train], batch_size=hps['batch_size'],
                  epochs=hps['epochs'], callbacks=[gru_metrics],
                  validation_data=(x[test], y[test]))

        acc = gru_metrics.val_accs[-1]
        prec = gru_metrics.val_precisions[-1]
        recall = gru_metrics.val_recalls[-1]
        f1 = gru_metrics.val_f1s[-1]
        results_file.write(f'gru, {acc}, {prec}, {recall}, {f1}' + '\n')
def get_metrics(arrTrue, arrPred):
    accuracy = Metrics(arrTrue, arrPred).accuracy()
    f1Score = Metrics(arrTrue, arrPred).f1_score()
    precision = Metrics(arrTrue, arrPred).precision_score()

    print('accuracy: ', accuracy * 100)
    print('f1Score: ', f1Score * 100)
    print('precision: ', precision * 100)
def get_mfs(arrTrue, mfs_sense):
    arrPred = []
    for i in arrTrue:
        arrPred.append(mfs_sense)

    print('=====================MFS=======================================')
    print('arrTrue: ', arrTrue)
    print('arrPred: ', arrPred)
    accuracy = Metrics(arrTrue, arrPred).accuracy()
    f1Score = Metrics(arrTrue, arrPred).f1_score()
    precision = Metrics(arrTrue, arrPred).precision_score()

    print('accuracy: ', accuracy * 100)
    print('f1Score: ', f1Score * 100)
    print('precision: ', precision * 100)
Example #7
0
def get_avg_coef(fp):
    with open(fp, 'r') as f:
        data = json.load(f)
    tm, vm = Metrics(), Metrics()
    for i, item in enumerate(data['items']):
        coef = Coef(**item['coef'])
        if re.match(r'mean', item['type']):
            continue
        if item['type'] == 'train':
            tm.append_coef(coef)

        if item['type'] == 'val':
            vm.append_coef(coef)

    return tm.avg_coef(), vm.avg_coef()
Example #8
0
    def closest_document(self, documents, centroids):

        metric = Metrics()
        list_centroid = []
        list_closest = []
        best_document = ""
        best_index = 0

        documents_k = documents[:]

        # posso clusterizar
        for centroid in range(len(centroids)):
            list_closest = []
            lista_best_document = []
            for k, document in documents_k:
                distance_cosine = metric.get_cosine_distance(
                    centroids[centroid][1], document)

                list_closest.append(DocumentKmean(k, distance_cosine,
                                                  document))

            list_centroid.append((centroids[centroid][0], list_closest))

        for index_lista in range(len(list_centroid)):
            if len(lista_best_document) == 0:
                lista_best_document = [
                    doc.distance_cosine
                    for doc in list_centroid[index_lista][1]
                ]
            else:
                lista_best_document = [
                    max(value) for value in np.array(
                        list(
                            zip([
                                doc.distance_cosine
                                for doc in list_centroid[index_lista][1]
                            ], [
                                distance_cosine
                                for distance_cosine in lista_best_document
                            ])))
                ]

        for k in range(len(list_centroid)):
            array_remove = []
            for index in range(len(lista_best_document)):
                if lista_best_document[index] > list_centroid[k][1][
                        index].distance_cosine:
                    array_remove.append(index)
            for index in sorted(array_remove, reverse=True):
                del list_centroid[k][1][index]

        # remove o elementos que não apresentaram nenhum valor de coseno para naão enviesar a media
        for k in range(len(list_centroid)):
            for doc in list_centroid[k][1]:
                if doc.distance_cosine == 0:
                    list_centroid[k][1].remove(doc)

        # posso clusterizar

        return list_centroid
Example #9
0
    def search(self, board):
        visited = set()
        queue = []
        node = Node(board.player, board.boxes, None, None, 0)
        metrics = Metrics('BFS', False, 0, 0, 0, 0, 0, [])
        queue.append(node)  #save initial node
        visited.add(node)  #save already visited nodes

        while queue:

            curr = queue.pop(0)
            if (board.is_completed(curr)):
                metrics.success = True
                metrics.frontier = len(queue)
                print('finished with: ' + str(metrics.nodes_expanded))

                return SearchResults(metrics, curr)
            visited.add(curr)
            moves = board.get_possible_moves(
                curr, self.checkDeadlocks)  #get a tree level
            if (moves):  #curr has children
                metrics.nodes_expanded += 1

            for move in moves:
                if move not in visited:
                    queue.append(move)

        # Queue is empty so there is no solution
        metrics.success = False
        return SearchResults(metrics, None)
Example #10
0
def upload_metrics(images, BUILDSPEC, is_any_build_failed,
                   is_any_build_failed_size_limit):
    """
    Uploads Metrics for a list of images.

    :param images: list[DockerImage]
    :param BUILDSPEC: Buildspec
    :param is_any_build_failed: bool
    :param is_any_build_failed_size_limit: bool
    """
    metrics = Metrics(context=constants.BUILD_CONTEXT,
                      region=BUILDSPEC["region"],
                      namespace=constants.METRICS_NAMESPACE)
    for image in images:
        try:
            metrics.push_image_metrics(image)
        except Exception as e:
            if is_any_build_failed or is_any_build_failed_size_limit:
                raise Exception(f"Build failed.{e}")
            else:
                raise Exception(f"Build passed. {e}")

    if is_any_build_failed_size_limit:
        raise Exception("Build failed because of file limit")

    FORMATTER.print("Metrics Uploaded")
    def __init__(self, config, environment, sess):
        super().__init__(config)
        self.sess = sess
        self.weight_dir = 'weights'

        self.config = config
        #self.ag = self.config.ag
        self.c_ag = self.config.ag.c
        self.mc_ag = self.config.ag.mc
        self.gl = self.config.gl
        self.environment = environment
        self.goals = self.define_goals()
        
        self.mc_ag.update({"q_output_length" : self.goal_size}, add = True)
        self.c_ag.update({"q_output_length" : self.environment.action_size}, add = True)
                
        self.mc_memory = self.create_memory(config = self.mc_ag,
                                         size   = self.environment.state_size)
        self.c_memory = self.create_memory(config = self.c_ag,
                                         size   = self.environment.state_size + \
                                                          self.goal_size)
       
        self.m = Metrics(self.config, self.logs_dir, self.goals)
    
        #self.config.print()
        self.build_hdqn()
        self.write_configuration()
    def __init__(self,
                 vocab_size,
                 embed_size,
                 nhidden,
                 nlayers,
                 model='LSTM',
                 cuda=False,
                 metrics=Metrics('metrics'),
                 verbose=False):
        super(Model, self).__init__()
        self.encoder = nn.Embedding(vocab_size, embed_size)
        if model == 'LSTM':
            self.rnn = nn.LSTM(embed_size, nhidden, nlayers)
        elif model == 'GRU':
            self.rnn = nn.GRU(embed_size, nhidden, nlayers)
        else:
            self.rnn = nn.RNN(embed_size, nhidden, nlayers)
        self.decoder = nn.Linear(nhidden, vocab_size)

        self.vocab_size = vocab_size
        self.embed_size = embed_size
        self.nhidden = nhidden
        self.nlayers = nlayers
        self.model = model
        self.device = torch.device("cpu" if not cuda else "cuda")
        self.metrics = metrics
Example #13
0
    def predict(self,
                db,
                fold='test',
                K=None,
                scmodule_path=None,
                lmbd_weights=None,
                get_predicates=False):
        print "predicting..."
        num = len(self.instances[fold])
        test_metrics = Metrics()

        all_predictions = set([])
        for i, inst in enumerate(self.instances[fold]):
            #print
            #print i, inst
            inference_weights = \
                self._instance_scores(fold, inst, K, scmodule_path, lmbd_weights)
            '''
            for gr in inference_weights:
                print gr, inference_weights[gr]
            '''
            metrics, active_heads = self._run_inference(
                fold, inst, inference_weights, get_active_heads=get_predicates)
            #print metrics
            if active_heads:
                all_predictions |= active_heads
            test_metrics.load_metrics(metrics)
            #exit()
        if get_predicates:
            return test_metrics, all_predictions
        else:
            return test_metrics
Example #14
0
    def test_model(self, test_inputs, test_labels, bs):
        """
        Evaluate the model and display results.
        :param test_inputs: Test data.
        :param test_labels: Test labels.
        :param bs: Batch size.
        :return: None
        """
        # Use callbacks
        metrics = Metrics((test_inputs, test_labels))

        # Evaluate inputs
        eval_res =  self.model.evaluate(test_inputs, test_labels,
                                        batch_size=bs,
                                        callbacks=[metrics])
        print(eval_res)

        labels = []
        for labels_arr in test_labels:
            for i in range(len(labels_arr)):
                if labels_arr[i] == 1:
                    labels.append(i)

        # Predict
        predicts = self.model.predict(test_inputs, batch_size=bs, verbose=1)
        pred_arr = np.argmax(predicts, axis=1)

        # F1 score weighted
        compute_print_f1(pred_arr, np.asarray(labels), "weighted")

        # F1 score macro
        compute_print_f1(pred_arr, np.asarray(labels), "macro")
Example #15
0
    def __init__(self, args):
        self.num_epochs = args.epochs
        self.cuda = args.cuda
        self.verbose = args.verbose

        self.batch_size = args.batch_size
        self.batch_size_val = args.batch_size_val
        self.learning_rate = args.learning_rate
        self.decay_epoch = args.decay_epoch
        self.lr_decay = args.lr_decay
        self.w_cat = args.w_categ
        self.w_gauss = args.w_gauss
        self.w_rec = args.w_rec
        self.rec_type = args.rec_type

        self.num_classes = args.num_classes
        self.gaussian_size = args.gaussian_size
        self.input_size = args.input_size

        # gumbel
        self.init_temp = args.init_temp
        self.decay_temp = args.decay_temp
        self.hard_gumbel = args.hard_gumbel
        self.min_temp = args.min_temp
        self.decay_temp_rate = args.decay_temp_rate
        self.gumbel_temp = self.init_temp

        self.network = GMVAENet(self.input_size, self.gaussian_size,
                                self.num_classes)
        self.losses = LossFunctions()
        self.metrics = Metrics()

        if self.cuda:
            self.network = self.network.cuda()
Example #16
0
def main(instances_directory, opt_directory, sub_directory):
    files = os.listdir(instances_directory + sub_directory)
    instances = []
    NO_RUNS = 1
    NO_EVALS = 1000
    for f in files:
        if f[0] != '.':
            instance = MaxCut(f, instances_directory, opt_directory)

            hp = {  # DEFAULT VALUES
                'num_particles': 50,
                'c1': 1.0,
                'c2': 1.0,
                'v_bound': 6.0,
                'nodes': instance.length_genotypes,
                'maxweight': 100,
                'edgeprob': 0.4,
                'GBO': False,
                'local_search': False
            }

            metrics = Metrics('BPSO', NO_RUNS, NO_EVALS, hp)

            for run in range(NO_RUNS):
                metrics.run = run
                VectorizedBinaryParticleSwarmOptimization(
                    instance, metrics, hp['num_particles'], run, hp['c1'],
                    hp['c2'], hp['v_bound']).np_run(hp['GBO'],
                                                    hp['local_search'])

            metrics_file_prefix = metrics_filename_generator(hp)

            metrics.write_to_file('metrics-PSO', [metrics],
                                  metrics_file_prefix)
Example #17
0
 def _build_criteria_and_optim(self):
     # noinspection PyArgumentList
     self.loss = WeightedSum.from_config(
         self.get("losses", ensure_exists=True))
     self.optim = torch.optim.Adam(self.model.parameters(),
                                   **self.get("optim/kwargs"))
     self.metrics = Metrics()
Example #18
0
def bilstm_train_and_eval(train_data,
                          dev_data,
                          test_data,
                          word2id,
                          tag2id,
                          args,
                          remove_O=False):
    train_word_lists, train_tag_lists = train_data
    dev_word_lists, dev_tag_lists = dev_data
    test_word_lists, test_tag_lists = test_data
    with open(args.map_path, 'wb') as f:
        pickle.dump([word2id, tag2id], f)

    start = time.time()
    vocab_size = len(word2id)
    out_size = len(tag2id)

    data_utils.save_config(args.config_path, vocab_size, out_size)

    bilstm_model = BILSTM_Model(vocab_size, out_size, args)
    bilstm_model.train(train_word_lists, train_tag_lists, dev_word_lists,
                       dev_tag_lists, word2id, tag2id)

    model_name = "bilstm_crf"
    print("训练完毕,共用时{}秒.".format(int(time.time() - start)))
    print("评估{}模型中...".format(model_name))
    pred_tag_lists, test_tag_lists = bilstm_model.test(test_word_lists,
                                                       test_tag_lists, word2id,
                                                       tag2id)

    metrics = Metrics(test_tag_lists, pred_tag_lists, remove_O=remove_O)
    metrics.report_scores()
    metrics.report_confusion_matrix()

    return pred_tag_lists, test_tag_lists
Example #19
0
    def train(self, episodes_num):
        metrics = Metrics(self.save_path)
        metrics.save(['score', 'exec_time'])

        for episode in range(1, episodes_num + 1):
            score = 0
            begin_time = datetime.datetime.now()
            state = self.env.reset()

            for i in itertools.count(0, 1):
                action = self.get_action(state)
                next_state, reward, done, _ = self.env.step(action)
                self.update(state, action, next_state, reward, done)
                state = next_state

                if done:
                    self.done_update(episode, i)
                    score = i
                    break

            self.epsilon = max(self.epsilon_min,
                               self.epsilon * self.epsilon_decay)
            exec_time = datetime.datetime.now() - begin_time
            metrics.save([score, exec_time.seconds])

            if episode % 100 == 0:
                self.target_network.save(self.save_path + '/episode' +
                                         str(episode) + '.h5')

        env.close()
Example #20
0
    def __init__(self, params):
        self.batch_size = params.batch_size
        self.batch_size_val = params.batch_size_val
        self.initial_temperature = params.temperature
        self.decay_temperature = params.decay_temperature
        self.num_epochs = params.num_epochs
        self.loss_type = params.loss_type
        self.num_classes = params.num_classes
        self.w_gauss = params.w_gaussian
        self.w_categ = params.w_categorical
        self.w_recon = params.w_reconstruction
        self.decay_temp_rate = params.decay_temp_rate
        self.gaussian_size = params.gaussian_size
        self.min_temperature = params.min_temperature
        self.temperature = params.temperature  # current temperature
        self.verbose = params.verbose

        self.sess = tf.Session()
        self.network = Networks(params)
        self.losses = LossFunctions()

        self.learning_rate = tf.placeholder(tf.float32, [])
        self.lr = params.learning_rate
        self.decay_epoch = params.decay_epoch
        self.lr_decay = params.lr_decay

        self.dataset = params.dataset
        self.metrics = Metrics()
Example #21
0
    def train(self, train_inputs, train_labels, val_inputs, val_labels, epochs, bs):
        """
        Train the model.
        :param train_inputs: Training data.
        :param train_labels: Training labels.
        :param val_inputs: Validation data.
        :param val_labels: Validation labels.
        :param epochs: Epochs.
        :param bs: Batch size.
        :return: None
        """
        # Create callbacks
        filepath= "checkpoints/weights-improvement-{epoch:02d}-acc-{val_accuracy:.2f}-loss-{val_loss:.2f}.hdf5"
        checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')

        metrics = Metrics((val_inputs, val_labels))
        es = EarlyStopping(monitor='val_accuracy', mode='max', verbose=1, patience=5)

        callbacks_list = [checkpoint, metrics, es]

        self.model.fit(
            train_inputs, train_labels,
           validation_data=(val_inputs, val_labels),
           epochs=epochs,
           batch_size=bs,
           verbose=1,
           callbacks=callbacks_list
        )
Example #22
0
 def __init__(self, ip, udp_port, mode='FIFO'):
     self.member = Member(None, None, ip, udp_port,
                          self.generate_random_port())
     self.udp_socket = None
     self.tcp_socket = None
     self.tracker_ip = None
     self.tracker_port = None
     self.input_fd = None
     if (mode != 'FIFO') and (mode != 'TOTAL_ORDER'):
         print 'Unsupported message ordering mode'
         sys.exit(3)
     else:
         self.mode = mode
     # dictionary of all groups this client belongs to
     self.group_list = {}
     # the group client has selected to send messages
     self.current_group = None
     # serial number of message sent by client
     self.message_num = 0
     # lamport vector implementation for FIFO ordering implemented as a dictionary
     # with key = <(username, group_name)> and value = <message-serial-no>
     self.lamport_dict = {}
     # buffer storing messages waiting to be delivered
     self.messages_buffer = []
     # lamport timestamp to support total ordering operation mode
     self.lamport_timestamp = 0
     # performance metrics
     self.metrics = Metrics()
Example #23
0
 def get_task_metrics(self, uid):
     tasks = self.goals[uid]
     m = []
     for task in tasks:
         m.append((task['name'], task['start']))
         m.append(('none', task['end']))
     metr = Metrics(m)
Example #24
0
def load_metrics(window_ms, stride_ms, imp_split, dos_type, model, baseline, subset, is_test=False):
    """Loads metrics from the file associated with the specified parameters.

    :param window_ms: the used window size (int ms).
    :param stride_ms: the used step-size (int ms).
    :param imp_split: a flag indicating whether the impersonation labels were split.
    :param dos_type: a string indicating the type of DoS dataset used ('modified', 'original').
    :param model: a string indicating the model used ('mlp', 'knn', 'svm', 'rf', 'nbc', 'lr', 'dt', 'bn').
    :param baseline: a flag indicating whether baseline parameters were used.
    :param subset: a list of feature labels, corresponding to the features used.
    :param is_test: a flag indicating whether the test set was used.
    :return: a dictionary of Metrics objects, with a key for each class as well as 'total'.
    """

    path, _ = get_metrics_path(window_ms, stride_ms, imp_split, dos_type, model, baseline, subset, is_test=is_test)
    metrics = {}

    with open(path, newline="") as file:
        reader = csv.reader(file, delimiter=",")
        # Skip header
        next(reader, None)

        # For each row in the file, construct a Metrics object
        for row in reader:
            metrics[row[0]] = Metrics(*[float(string) for string in row[1:]])

    return metrics
Example #25
0
    def __init__(self, args):
        self.num_epochs = args.epochs
        self.cuda = args.cuda
        self.verbose = args.verbose

        self.batch_size = args.batch_size
        self.batch_size_val = args.batch_size_val
        self.learning_rate = args.learning_rate
        self.decay_epoch = args.decay_epoch
        self.lr_decay = args.lr_decay
        self.w_cat = args.w_categ
        self.w_gauss = args.w_gauss
        self.w_rec = args.w_rec
        self.rec_type = args.rec_type

        self.gaussian_size = args.gaussian_size
        self.input_size = args.input_size
        self.output_size = args.output_size

        self.network = VAENet(self.input_size, self.gaussian_size,
                              self.output_size)
        self.losses = LossFunctions()
        self.metrics = Metrics()

        if self.cuda:
            self.network = self.network.cuda()
Example #26
0
    def gather(self):
        for i in range(self.levelCount):
            level_name = self.levels[i]

            print('gathering from ' + level_name)

            delay = self.gather_delay[i]
            args = copy.deepcopy(self.args)
            args.memory_delay = delay
            args.directory = 'gather_' + level_name
            
            agent, hasSavedMemory, max_frame_saved = playGameReal_a3c_incremental_init(args, agent_a3c.Agent, self.state_dim, self.action_dim)
            
            while True:
                hasSavedMemory, max_frame_saved = playGameReal_a3c_incremental(agent, self.env, self.state_dim, self.action_dim, hasSavedMemory, max_frame_saved)
                if hasSavedMemory:
                    break
                
            
            agent.metrics.save(agent.results_location, 'metrics') # Save metrics
            agent.metrics.runs.graph(agent.results_location, 'runs')
            agent.metrics = Metrics(agent.metrics.type) # Reset metrics
            agent.brain.metrics = agent.metrics
            
            print('switching levels')
            # Switch to next level
            self.env.env.env.press('right_arrow')
            time.sleep(0.1)
            self.env.env.env.release('right_arrow')
            time.sleep(0.1)
            
        print('all done')
Example #27
0
def queries(training_set, test_set, train_vectors, test_vectors, path):
    threshold_start = 1
    threshold_end = 10
    thresholds = []
    metrics_obj_list = []

    for i in range(threshold_start, threshold_end + 1):
        thresholds.append(i)
        metrics_obj_list.append(Metrics())

    fw = FileWriter(path)
    eval = Evaluator(training_set)

    for i in range(len(test_vectors)):
        scores = query(train_vectors, test_vectors[i], threshold_end)
        query_doc = test_set[i]

        for j in range(len(thresholds)):
            threshold = thresholds[j]

            eval.query([training_set[x] for (x, y) in scores[0:threshold]],
                       query_doc)
            eval.calculate()

            metrics_obj_list[j].updateConfusionMatrix(eval)
            metrics_obj_list[j].updateMacroAverages(eval)

    for obj in metrics_obj_list:
        obj.calculate(len(test_set))

    fw.writeToFiles(metrics_obj_list, thresholds)
Example #28
0
def empirical_estimate(X, n_samples, plot=True):
    ''' Empirical estimation '''
    print('##########  Empirical Estimation  ##########')
    # Sample Covariance matrix
    cov_emp = np.dot(X.T, X) / n_samples
    prec_emp = np.linalg.pinv(cov_emp)
    A = np.diag(np.diag(prec_emp)) - prec_emp
    # uncomment for thresholding in unweighted graph
    # A[A>eps] = 1
    # A[A<eps] = 0
    # prec_emp = np.diag(np.sum(A, axis=1)) - A
    metric = Metrics(L_true, prec_emp)
    print('Rel error:', metric.relative_error())
    print('F1 score:', metric.f1_score())

    if plot:
        fig = plt.figure(figsize=(15, 15))
        plt.title('Estimated Laplacian empirical')
        plt.imshow(prec_emp)
        plt.colorbar()
        filename = 'plots/bipartite_estimated_Laplacian_empirical.png'
        fig.savefig(filename, format='png')
        plt.close()

        fig = plt.figure(figsize=(15, 15))
        A = np.diag(np.diag(prec_emp)) - prec_emp
        plt.title('Estimated Adjacency empirical')
        plt.imshow(A)
        plt.colorbar()
        filename = 'plots/bipartite_estimated_adj_empirical.png'
        fig.savefig(filename, format='png')
        plt.close()
    return prec_emp, cov_emp
Example #29
0
def make_model():
    num_leads_signal = 12
    model = Sequential()

    model.add(
        Conv1D(32,
               kernel_size=8,
               activation=K.elu,
               input_shape=(None, num_leads_signal),
               padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(MaxPooling1D(pool_size=2))

    model.add(Bidirectional(LSTM(50, return_sequences=True)))

    model.add(UpSampling1D(2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(UpSampling1D(2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(UpSampling1D(2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(UpSampling1D(2))
    model.add(Conv1D(32, kernel_size=8, activation=K.elu, padding='same'))
    model.add(Dense(4, activation='softmax'))

    metric = Metrics()
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy', metric.Se, metric.PPV])
    return model
Example #30
0
    def __init__(self, config):
        self.config = config
        self.fort_timeouts = dict()
        self.pokemon_list = json.load(
            open(os.path.join('data', 'pokemon.json')))
        self.item_list = json.load(open(os.path.join('data', 'items.json')))
        self.metrics = Metrics(self)
        self.latest_inventory = None
        self.cell = None
        self.recent_forts = [None] * config.forts_max_circle_size
        self.tick_count = 0
        self.softban = False
        self.start_position = None
        self.last_map_object = None
        self.last_time_map_object = 0
        self.logger = logging.getLogger(type(self).__name__)

        # Make our own copy of the workers for this instance
        self.workers = []

        # Theading setup for file writing
        self.web_update_queue = Queue.Queue(maxsize=1)
        self.web_update_thread = threading.Thread(
            target=self.update_web_location_worker)
        self.web_update_thread.start()