コード例 #1
0
def main(args):
    # set up the model. different environments need different params
    tt_params = {
        "BipedalWalker-v2": (24, 4, 2, 10),
        "CarRacing-v0": (16**2, 3, 4, 3)
    }
    tt = AlphaSegmenter(*tt_params[args.env])

    # reference trajectory used to plot latents over time
    ref_traj = next(get_datasets(folder=f"data/{args.env}")).tensors

    optimizer = optim.Adam(tt.parameters(), lr=1e-3)
    losses = []
    alphas = []
    try:
        for epoch_idx in range(3):
            for i, d in enumerate(get_datasets(folder=f"data/{args.env}")):
                new_losses, new_alphas = train(tt,
                                               d,
                                               optimizer,
                                               ref_traj=ref_traj)
                losses += new_losses
                alphas += new_alphas
                print(f"Epoch {epoch_idx}:\t{losses[-1]}")
    except KeyboardInterrupt:
        print("Stopping Early!")

    fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True)
    ax1.plot(np.array(losses),
             label=["loss", "prior", "recon", "dynamics", "reinforce"])
    ax1.legend()
    ax2.imshow(np.array(alphas).T)
    plt.show()
コード例 #2
0
def run(run_name, config_update):
    print(f'\nstart: {run_name}')

    pwd = os.path.dirname(os.path.abspath(__file__))
    with open(f'{pwd}/config.yml', 'r') as yml:
        config = yaml.safe_load(yml)

    # init_exp
    dir_save, config = util.init_exp(config, config_update, run_name)
    model_params = config['model']['params']
    split_params = config['split']

    # datast
    df, target = util.get_datasets()
    X_train, X_valid, y_train, y_valid = train_test_split(df,
                                                          target,
                                                          test_size=0.3,
                                                          **split_params)

    # fit
    print(model_params)
    model = DecisionTreeClassifier(**model_params)
    model.fit(X_train, y_train)

    # pred
    X_valid_pred = model.predict(X_valid)

    # eval
    acc_valid = accuracy_score(y_valid, X_valid_pred)
    print(f'acc_valid: {acc_valid:.5f}')
コード例 #3
0
def map_eval(eval_file, token_length, tokenizer, device, model, label_list):
    model.eval()
    datasets, labels = get_datasets(eval_file)
    total_batches = 0
    total_avp = 0.0
    total_mrr = 0.0
    # scores, labels = [], []
    for k, dataset in tqdm(datasets.items(), desc="Eval datasets"):
        examples = []
        for i, data in enumerate(dataset):
            examples.append(InputExample(i, data[0], data[1], '0'))
        eval_features = convert_examples_to_features(examples, label_list,
                                                     token_length, tokenizer)
        all_input_ids = torch.tensor([f.input_ids for f in eval_features],
                                     dtype=torch.long).to(device)
        all_input_mask = torch.tensor([f.input_mask for f in eval_features],
                                      dtype=torch.long).to(device)
        all_segment_ids = torch.tensor([f.segment_ids for f in eval_features],
                                       dtype=torch.long).to(device)
        # all_label_ids = torch.tensor(
        #   [f.label_id for f in eval_features], dtype=torch.long).to(device)
        x_input_ids = torch.tensor([f.input_ids_x for f in eval_features],
                                   dtype=torch.long).to(device)
        x_input_mask = torch.tensor([f.input_mask_x for f in eval_features],
                                    dtype=torch.long).to(device)
        x_segment_ids = torch.tensor([f.segment_ids_x for f in eval_features],
                                     dtype=torch.long).to(device)
        y_input_ids = torch.tensor([f.input_ids_y for f in eval_features],
                                   dtype=torch.long).to(device)
        y_input_mask = torch.tensor([f.input_mask_y for f in eval_features],
                                    dtype=torch.long).to(device)
        y_segment_ids = torch.tensor([f.segment_ids_y for f in eval_features],
                                     dtype=torch.long).to(device)
        with torch.no_grad():
            logits = model(x_input_ids, x_input_mask, x_segment_ids,
                           y_input_ids, y_input_mask, y_segment_ids,
                           all_input_ids, all_segment_ids, all_input_mask)
        score = F.softmax(logits, dim=1)[:, 1].cpu().numpy()
        label = np.array(list(map(int, labels[k])))
        # print(score, label)
        # scores.append(score)
        #      labels.append(label)
        total_avp += mean_average_precision(label, score)
        total_mrr += mean_reciprocal_rank(label, score)
        total_batches += 1
    mAP = total_avp / total_batches
    mRR = total_mrr / total_batches
    logger.info("map is : {}, mrr is : {}".format(mAP, mRR))
    data = {'map': mAP, 'mrr': mRR}
    with open('./result.json', 'w', encoding='utf-8') as f:
        json.dump(data, f)
コード例 #4
0
ファイル: run.py プロジェクト: foben/histalyzer
def main():
    #ARGUMENT PARSING:
    known_metrics = ['cr64', 'cg64', 'cb64','ccol',\
            'dd2', 'da3', 'dd3', 'dd2_64', 'da3_64', 'dd3_64',\
            'dd2_128', 'dd3_128', 'da3_128']
    used_metrics = []
    parser = argparse.ArgumentParser(prog='Histalyzer')
    for m in known_metrics:
        parser.add_argument('--%s' % m, type=int)
    parser.add_argument('--nn', type=int, default=5)
    parser.add_argument('--wcolor', type=int, default=1)
    parser.add_argument('--wdepth', type=int, default=1)
    parser.add_argument('--quiet', action='store_true')
    parser.add_argument('--debug', action='store_true')
    parser.add_argument('--no_files', action='store_true')
    parser.add_argument('--scores', action='store_true')
    parser.add_argument('categories', nargs='+')
    parser.add_argument('--every', type=int, default=25)
    parsed =  parser.parse_args()
    SET_NOFILES = parsed.no_files
    SET_QUIET = parsed.quiet
    SET_DEBUG = parsed.debug
    SET_EVERY = parsed.every
    SET_SCORES = parsed.scores
    if SET_QUIET:
        logging.basicConfig(level=logging.WARN)
    elif SET_DEBUG:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    categories, SET_PARTIAL = parse_categories(parsed.categories)
    weight_color = parsed.wcolor
    weights_color = []
    weight_depth = parsed.wdepth
    weights_depth = []
    neighbors = parsed.nn

    for k, v in vars(parsed).iteritems():
        if not v or not k in known_metrics:
            continue
        if k[0] == 'c':
            weights_color.append((k, v))
            used_metrics.append(k)
        elif k[0] == 'd':
            weights_depth.append((k, v))
            used_metrics.append(k)
        else:
            raise ValueError('unknown metric type: "%s"' % k[0])

    if not weights_color:
        weight_color = 0
    if not weights_depth:
        weight_depth = 0
    weight_dict = {}
    weight_dict['color'] = ( weight_color , weights_color )
    weight_dict['depth'] = ( weight_depth , weights_depth )

    #PARAMETER SETUP:
    #frameset = defs.EVERY_5TH
    #frameset = defs.EVERY_25TH
    frameset = defs.get_frameset(SET_EVERY)
    all_individuals = parse_data(used_metrics)

    if not SET_NOFILES:
        dir_top, dir_raw = create_directory_structure(frameset, used_metrics, neighbors,
                partial=SET_PARTIAL, weights_color=weights_color, weights_depth=weights_depth)

    #OVERALL VARIABLES:
    overall_tested = 0
    overall_correct = 0
    
    classifier = KNNClassifier(weight_dict, neighbors, defs.ALL_CATEGORIES)

    for category in categories:
        #CATEGORY VARIABLES:
        category_tested = 0
        category_correct = 0

        for instance in all_individuals[category]:
            traindata, testdata = util.get_datasets(category, instance, all_individuals, frameset[0])
            #result, instance_tested, instance_correct = nn.nearest_neighbor(traindata, testdata, neighbors)
            result, instance_tested, instance_correct = classifier.perform_classification(traindata, testdata)
            category_tested += instance_tested
            overall_tested += instance_tested
            category_correct += instance_correct
            overall_correct += instance_correct
            if not SET_NOFILES:
                f = open("%s/category_%s.csv" % (dir_raw, category), "a")
                f.write('%s %s,%s\n' % (category, instance, result))
                f.close()

        average_aggregated = float(category_correct) / category_tested * 100
        if not SET_NOFILES:
            f = open("%s/category_%s.csv"% (dir_raw, category), "a")
            f.write('%s average,%s\n' % (category, average_aggregated))
            f.close()
    
    if not SET_NOFILES:
        classifier.print_confusion_matrix(dir_top + '/confusion.csv')

    if SET_SCORES:
        scrs = classifier.get_overall_scores()
        print "{},{},{}".format(scrs[0], scrs[1], scrs[2])

    overall_percentage = float(overall_correct)/overall_tested * 100
    logging.info("Overall %% %f", overall_percentage)
コード例 #5
0
    pass
tf.enable_v2_behavior()
import tensorflow_hub as hub

# from tensorflow import keras
import numpy as np
import pathlib
from util import get_datasets, calculate_accuracy
from processing import preprocess, postprocess
from tflite_helper import TFLiteConvertor

##################
# Prepare datasets
##################

cv_dataset, test_dataset = get_datasets()
num_images_in_cv_dataset = 100

# For 224 x 224
preprocess_cv_dataset_224 = list()
for i in range(0, num_images_in_cv_dataset):
    preprocess_cv_dataset_224.append(preprocess(cv_dataset[i], 224, 224))

# For 299 x 299
preprocess_cv_dataset_299 = list()
for i in range(0, num_images_in_cv_dataset):
    preprocess_cv_dataset_299.append(preprocess(cv_dataset[i], 299, 299))


def representative_data_gen_224():
    for input_image in preprocess_cv_dataset_224:
コード例 #6
0
                # then  z_t should be equal to c(s_t)
                s_target = s[l+1]
                s_pred = self.c[l](s[l])
                loss += (torch.pow(s_target - s_pred, 2) * b[l]).sum()

                # enforce hierarchy. this says that if a transition occurs at
                # the level above, then one should occur at this level too
                loss += (b[l+1] * (1. - b[l])).sum()

        return loss


    def decode(self, z):
        T = z.shape[1]
        out = self.out_net(z.view(-1, self.h_dim)).view(self.n_levels, T, -1)
        return torch.split(out, [self.z_out, self.g_out, 1], dim=2)


if __name__ == '__main__':
    fc = FCSegmenter(24, 4, 10, 6)

    for i, dataset in enumerate(get_datasets()):
        for j, (s, a) in enumerate(DataLoader(dataset, batch_size=50, shuffle=False)):
            with torch.no_grad():
                z = fc.encode(s, a)
                s, a, b = fc.decode(z)
                fc.likelihood(s, a, b)

            if j > 2:
                sys.exit(0)
コード例 #7
0
def go(model_index):
    global send_whatsapp
    global split_num
    global k_fold_number
    global include_dependency_tree_feature
    global include_pos_feature
    global spam_max_length
    global non_spam_max_length
    global block_all_models_if_k_big
    global use_morphological
    global use_both

    if not use_morphological:
        include_dependency_tree_feature = False
        include_pos_feature = False

    if k_fold_number < 1:
        print("K is invalid, must be 1 or bigger.")
        return

    if block_all_models_if_k_big and model_index == 0 and k_fold_number > 1:
        print(
            "Can't train for all models at once with k_fold_num > 1 , because of memory problems."
        )
        print(
            "Please change k_fold_number to be 1, or choose a single model (1-5)"
        )
        return

    start_time = time.time()

    print()
    print("==========")
    print()

    datasets = get_datasets(spam_max_length, non_spam_max_length, use_both,
                            use_morphological)
    print("Starting cross-validation, with all the datasets:")
    """
    for x1 in datasets:
        print (x1)
        for x2 in datasets[x1]:
            print(x2)
            for x3 in datasets[x1][x2]:
                print(x3, datasets[x1][x2][x3])
    """

    for dataset in datasets.keys():
        print("Testing model %d (%s) for dataset '%s':" %
              (model_index, model.model_desc(model_index), dataset))
        test_driver(
            model.model_desc(model_index),
            model.Model,
            datasets[dataset],
            model_index,
            split=split_num,
            k_fold_num=k_fold_number,
            include_dependency_tree_feature=include_dependency_tree_feature,
            include_pos_feature=include_pos_feature,
            dataset_name=dataset)
        print()
        print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
        print("@@@@@@           Finished dataset %s           @@@@@" % dataset)
        print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
        print()

    end_time = time.time()
    elapsed = end_time - start_time
    print()
    print("All time: %d seconds." % elapsed)
    print()
    print("FINISHED")

    if send_whatsapp:
        # get rid of none, to send message to whatsapp
        if (spam_max_length is None):
            spam_max_length = "None"
        if (non_spam_max_length is None):
            non_spam_max_length = "None"

        # send message to whatsapp
        msg = """הסתיימה הרצה בשרת.
        k = %d
        datasets = %s
        model index = %d
        model name = %s
        
        dependency_trees_feature = %r
        pos_feature = %r
        spam_max_length = %s
        non_spam_max_length = %s
        
        Total time: %d seconds""" % (k_fold_number, str(list(
            datasets.keys())), model_index, model.model_desc(model_index),
                                     include_dependency_tree_feature,
                                     include_pos_feature, str(spam_max_length),
                                     str(non_spam_max_length), elapsed)

        whatsapp.send_msg(msg)