Exemple #1
0
def load_data(train_idx: NpArray, val_idx: NpArray) -> \
              Tuple[NpArray, NpArray, NpArray, NpArray, NpArray, Any, List[int]]:
    """ Loads all data. Returns: x_train, y_train, x_val, y_val, x_test,
    label_binarizer, clips_per_sample """
    train_df = pd.read_csv("../data/train.csv", index_col="fname")

    train_cache = "../output/train_cache_v%02x.pkl" % DATA_VERSION
    test_cache = "../output/test_cache_v%02x.pkl" % DATA_VERSION

    print("reading train dataset")
    if os.path.exists(train_cache):
        x = pickle.load(open(train_cache, "rb"))
    else:
        train_files = find_files("../data/audio_train/")
        print("len(train_files)", len(train_files))

        x = load_dataset(train_files)
        pickle.dump(x, open(train_cache, "wb"))

    # get whatever data metrics we need
    label_binarizer = LabelBinarizer()
    label_binarizer.fit(train_df["label"])

    x_train, y_train = build_train_dataset(x, train_df["label"], train_idx)
    x_val, y_val = build_train_dataset(x, train_df["label"], val_idx)

    print("reading test dataset")
    if os.path.exists(test_cache):
        x_test = pickle.load(open(test_cache, "rb"))
    else:
        test_files = find_files("../data/audio_test/")
        print("len(test_files)", len(test_files))

        x_test = load_dataset(test_files)
        pickle.dump(x_test, open(test_cache, "wb"))

    x_test, clips_per_sample = build_test_dataset(x_test)

    x_joined = np.concatenate([x_train, x_val])
    mean, std = np.mean(x_joined), np.std(x_joined)
    x_train = (x_train - mean) / std
    x_val = (x_val - mean) / std
    x_test = (x_test - mean) / std

    y_train = label_binarizer.transform(y_train)
    y_val = label_binarizer.transform(y_val)

    x_train = np.expand_dims(x_train, -1)
    x_val = np.expand_dims(x_val, -1)
    x_test = np.expand_dims(x_test, -1)

    x_train = x_train[:, :MAX_MFCC, ...]
    x_val = x_val[:, :MAX_MFCC, ...]
    x_test = x_test[:, :MAX_MFCC, ...]

    print("x_train.shape", x_train.shape, "y_train.shape", y_train.shape)
    print("x_val.shape", x_val.shape, "y_val.shape", y_val.shape)

    return x_train, y_train, x_val, y_val, x_test, label_binarizer, clips_per_sample
Exemple #2
0
def q15(never_used_param):
	n_params = 2
	N = 1000
	X_train, y_train = load_dataset(N, f, n_params, noise_level=0.1)
	lr = linreg.LinearRegression()
	lr.fit(transform(X_train), y_train)
	X_test, y_test = load_dataset(N, f, n_params, noise_level=0.1)
	err = np.sum(np.vectorize(sign)(lr.predict(transform(X_test))) != y_test)
	err_rate = err * 1.0 / N
	return err_rate
Exemple #3
0
def main():
    args = loadArgu()
    data.load_dataset(args.dataset)  # get two list of train images and test images
    down_size = args.imgsize // args.scale
    network = EDSR(down_size, args.layers, args.featuresize, args.scale)
    network.set_data_fn(data.get_batch, (args.batchsize, args.imgsize, down_size), data.get_test_set,
                        (args.imgsize, down_size))
    network.train(args.iterations, args.savedir)

    return 1
def load_data(
) -> Tuple[NpArray, NpArray, NpArray, NpArray, NpArray, List[str], Any]:
    """ Loads all data. """
    train_df = pd.read_csv("../data/train.csv", index_col="fname")

    train_cache = "../output/train_cache_v%02d.pkl" % DATA_VERSION
    test_cache = "../output/test_cache_v%02d.pkl" % DATA_VERSION

    print("reading train dataset")
    train_files = find_files("../data/audio_train/")
    print("len(train_files)", len(train_files))

    if os.path.exists(train_cache):
        x = pickle.load(open(train_cache, "rb"))
    else:
        x = load_dataset(train_files)
        pickle.dump(x, open(train_cache, "wb"))

    # get whatever data metrics we need
    label_binarizer = LabelBinarizer()
    label_binarizer.fit(train_df["label"])

    train_idx, val_idx = train_test_split(range(len(x)), test_size=TEST_SIZE)
    x_train, y_train = build_train_dataset(x, train_df["label"], train_idx)
    x_val, y_val = build_train_dataset(x, train_df["label"], val_idx)

    print("reading test dataset")
    test_files = find_files("../data/audio_test/")
    test_index = [os.path.basename(f) for f in test_files]

    if os.path.exists(test_cache):
        x_test = pickle.load(open(test_cache, "rb"))
    else:
        x_test = load_dataset(test_files)
        pickle.dump(x_test, open(test_cache, "wb"))

    x_test = take_first(x_test)

    x_joined = np.concatenate([x_train, x_val])
    mean, std = np.mean(x_joined), np.std(x_joined)
    x_train = (x_train - mean) / std
    x_val = (x_val - mean) / std
    x_test = (x_test - mean) / std

    y_train = label_binarizer.transform(y_train)
    y_val = label_binarizer.transform(y_val)

    print("x_train.shape", x_train.shape, "y_train.shape", y_train.shape)
    print("x_val.shape", x_val.shape, "y_val.shape", y_val.shape)

    return x_train, y_train, x_val, y_val, x_test, test_index, label_binarizer
Exemple #5
0
    def __init__(self, config):
        self.config = config
        dataset = load_dataset(config)
        # sq_dataset = load_simple_questions_dataset(config)
        (train_data, embed_mat, word_to_id,
         id_to_word) = (dataset.train, dataset.embd_mat, dataset.word2idx,
                        dataset.idx2word)
        # self.id_to_word = {i: w for w, i in word_to_id.items()}
        self.id_to_word = id_to_word
        self.word_to_id = word_to_id
        # Generate input
        train_input = InputProducer(data=train_data,
                                    word_to_id=word_to_id,
                                    id_to_word=id_to_word,
                                    config=config)

        # Build model
        self.model = CtrlVAEModel(input_producer=train_input,
                                  embed_mat=embed_mat,
                                  config=config,
                                  is_train=FLAGS.is_train)

        # Supervisor & Session
        self.sv = tf.train.Supervisor(logdir=FLAGS.model_subdir,
                                      save_model_secs=config.save_model_secs)

        gpu_options = tf.GPUOptions(allow_growth=True)
        sess_config = tf.ConfigProto(allow_soft_placement=True,
                                     gpu_options=gpu_options)

        self.sess = self.sv.PrepareSession(config=sess_config)
def coco_collate_fn(data):

    # ########################
    # LOAD DATASET
    # ########################

    corpus, word_to_idx, idx_to_word, train_dataset = load_dataset()

    # ########################
    # GROUP BATCH
    # ########################

    data.sort(key=lambda x: len(x["data"]), reverse=True)
    captions = [
        torch.FloatTensor(string_to_tensor(sentence)) for sentence in data
    ]
    lengths = [len(cap) for cap in captions]
    labels = [
        torch.FloatTensor([word_to_idx[sentence['target']]])
        for sentence in data
    ]

    targets = torch.zeros(len(captions), max(lengths)).long()
    for i, cap in enumerate(captions):
        end = lengths[i]
        targets[i, :end] = cap[:end]

    return targets, torch.FloatTensor(labels).long(), lengths
Exemple #7
0
def main(model='cnn', input_var = T.tensor4('inputs'), target_var = T.ivector('targets'),  num_epochs=10, lrn_rate=0.00004):
    # Load the dataset
    print("Loading data...")
    X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()

    # Prepare Theano variables for inputs and targets
    #input_var  = T.tensor4('inputs')
    #target_var = T.ivector('targets')

    # Create neural network model (depending on first command line parameter)
    print("Building model and compiling functions...")
    if model == 'cnn':
        network = build_cnn(input_var)
    elif model.startswith('custom_cnn:'):
        depth, width, drop_in, drop_hid, box_size = model.split(':', 1)[1].split(',')
        print(box_size)
        network = build_custom_cnn(input_var, int(depth), int(width),
                                   float(drop_in), float(drop_hid), int(box_size))
    else:
        print("Unrecognized model type %r." % model)
        return
    
    network = train(network, num_epochs, lrn_rate, input_var, target_var,  X_train, y_train, X_val, y_val, X_test, y_test)
    
    
    
    return  network
Exemple #8
0
def train_encoder(mdl, crit, optim, sch, stat):
    """Train REL or EXT model"""
    logger.info(f'*** Epoch {stat.epoch} ***')
    mdl.train()
    it = DataLoader(load_dataset(args.dir_data,
                                 'train'), args.model_type, args.batch_size,
                    args.max_ntokens_src, spt_ids_B, spt_ids_C, eos_mapping)
    for batch in it:
        _, logits = mdl(batch)
        mask_inp = utils.sequence_mask(batch.src_lens, batch.inp.size(1))
        loss = crit(logits, batch.tgt, mask_inp)
        loss.backward()
        stat.update(loss,
                    'train',
                    args.model_type,
                    logits=logits,
                    labels=batch.tgt)
        torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
        optim.step()
        if stat.steps == 0:
            continue
        if stat.steps % args.log_interval == 0:
            stat.lr = optim.param_groups[0]['lr']
            stat.report()
            sch.step(stat.avg_train_loss)
        if stat.steps % args.valid_interval == 0:
            valid_ret(mdl, crit, optim, stat)
Exemple #9
0
def train(ctx, dataset_fpath, all_data, max_depth, model_fpath, name, test):

    if not os.path.isfile(dataset_fpath):
        logging.info('No dataset was provided, building with default settings')
        data.save_dataset(dataset_fpath)

    dataset = data.load_dataset(dataset_fpath, return_arrays=False)
    clf = model.REGISTRY[name](max_depth=max_depth)

    X_train, y_train = dataset['X_train'], dataset['y_train']
    X_test, y_test = dataset['X_test'], dataset['y_test']
    if all_data:
        X_train = np.concatenate((X_train, X_test), axis=0)
        y_train = np.concatenate((y_train, y_test), axis=0)

    clf.fit(X_train, y_train)

    model.save_model(clf, model_fpath)

    acc = clf.score(X_train, y_train)
    logging.info("Accuracy on training set: {}".format(acc))

    if test:
        acc = clf.score(X_test, y_test)
        logging.info("Accuracy on the test set: {}".format(acc))
Exemple #10
0
    def run_test_evaluation(self, wandb_runid=None):
        self.logger.info('running multiwoz evaluation')
        self.logger.info('generating responses')
        self.model.eval()
        dataset = load_dataset('multiwoz-2.1-test', use_goal=True)
        dataset = wrap_dataset_with_cache(dataset)
        responses, beliefs, gold_responses, delex_responses, delex_gold_responses = \
            generate_predictions(self.prediction_pipeline, dataset, 'test-predictions.txt')
        evaluator = MultiWozEvaluator(dataset,
                                      is_multiwoz_eval=True,
                                      logger=self.logger)
        success, matches, domain_results = evaluator.evaluate(beliefs,
                                                              delex_responses,
                                                              progressbar=True)
        self.logger.info('evaluation finished')
        self.logger.info('computing bleu')
        bleu = compute_bleu_remove_reference(responses, gold_responses)
        delex_bleu = compute_delexicalized_bleu(delex_responses,
                                                delex_gold_responses)
        self.logger.info(f'test bleu: {bleu:.4f}')
        self.logger.info(f'delex test bleu: {delex_bleu:.4f}')

        # We will use external run to run in a separate process
        if self.is_master():
            run = wandb.run
            shutil.copy('test-predictions.txt', run.dir)
        else:
            api = wandb.Api()
            run = api.run(self.wandb_runid)
            run.upload_file('test-predictions.txt')
        run.summary.update(
            dict(test_inform=matches,
                 test_success=success,
                 test_bleu=bleu,
                 test_delex_bleu=delex_bleu))
Exemple #11
0
def classify(vectors, args):
    if not os.path.isfile(args.classifydir + '_labels.txt'):
        return defaultdict(lambda: 0)
    X, Y = read_node_label(args.classifydir + '_labels.txt')

    #     print("Training classifier using {:.2f}% nodes...".format(args.train_percent * 100))
    clf = Classifier(vectors=vectors,
                     clf=LogisticRegression(solver="lbfgs", max_iter=4000))
    #     scores = clf.split_train_evaluate(X, Y, args.train_percent)
    features, labels, graph, idx_train, idx_val, idx_test = load_dataset(
        str(args.classifydir.split("/")[-1]))
    #     print(idx_train)
    #     print(type(idx_train))
    idx_train = list(idx_train)

    #     idx_val = list(idx_val)
    #     idx_val += list(idx_test)[:600]

    idx_test = list(idx_test)  #[600:]

    #     for i in idx_val:
    #         idx_train.append(i)

    #     idx_val = idx_val[400:]

    print("TRAINING SIZE", len(idx_train), "VALIDATION SIZE", len(idx_val),
          "TESTING SIZE: ", len(list(idx_test)))
    scores = clf.split_train_evaluate_idx(X, Y, idx_train, idx_val)

    # scores = clf.split_train_evaluate(X, Y, args.train_percent)
    test_scores = clf.split_train_evaluate_idx(X, Y, idx_train, idx_test)
    test_x.append(test_scores['macro'])
    print("micro:", test_scores['micro'], "macro:", test_scores['macro'])

    return scores
def main(args):
    # logging
    if args.use_wandb:
        wandb.init(project="HW5-TextStyleTransfer", config=args)
        #wandb.config.update(vars(args))
        args = wandb.config
        print(args)
    
    train_iters, dev_iters, test_iters, vocab = load_dataset(args)
    print('Vocab size:', len(vocab))
    model_F = StyleTransformer(args, vocab).to(args.device)
    model_D = Discriminator(args, vocab).to(args.device)
    print(args.discriminator_method)

    if os.path.isfile(args.preload_F):
        temp = torch.load(args.preload_F)
        model_F.load_state_dict(temp)
    if os.path.isfile(args.preload_D):
        temp = torch.load(args.preload_D)
        model_D.load_state_dict(temp)
    
    if args.do_train:
        train(args, vocab, model_F, model_D, train_iters, dev_iters, test_iters)
    if args.do_test:
        dev_eval(args, vocab, model_F, test_iters, 0.5)
Exemple #13
0
def train():
    model = create_model()
    model.summary()
    model.add(Flatten())

    cars, notcars = load_dataset()
    print("number of cars: ", len(cars), ", number of notcars: ", len(notcars))
    filenames = []
    filenames.extend(cars)
    filenames.extend(notcars)
    X = np.array(read_images(filenames))
    Y = np.concatenate([np.ones(len(cars)), np.zeros(len(notcars))])
    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        random_state=63)

    model_file = "model_-{epoch:02d}-{val_loss:.2f}.h5"
    cb_checkpoint = ModelCheckpoint(filepath=model_file, verbose=1)
    cb_early_stopping = EarlyStopping(patience=2)
    optimizer = Adam()
    model.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])

    model.fit(X_train,
              Y_train,
              batch_size=128,
              epochs=20,
              verbose=2,
              validation_data=(X_test, Y_test),
              callbacks=[cb_checkpoint, cb_early_stopping])
    # Temporary fix - AttributeError: 'NoneType' object has no attribute 'TF_NewStatus
    K.clear_session()
Exemple #14
0
def run(audio_db, prepared_dataset=None, new_song=None):
    # prepare data
    training_set, test_set = load_dataset(audio_db, new_song)
    if prepared_dataset:
        training_set = pickle.load(prepared_dataset)
        shuffle(training_set)
    print 'Train set: ' + repr(len(training_set))
    print 'Test set: ' + repr(len(test_set))
    # generate predictions
    predictions = []
    if not new_song:
        for x in range(len(test_set)):
            neighbors = get_neighbors(training_set, test_set[x], K_POINTS)
            result = predict(neighbors)
            predictions.append(result)
            print('> predicted=' + repr(result) + ', actual=' +
                  repr(test_set[x][-1]))
        accuracy = get_accuracy(test_set, predictions)  # check accuracy
        print 'Accuracy: ' + repr(accuracy)
        return dump_best_accuracy(
            dataset=training_set, accuracy=accuracy
        )  # Returns True If accuracy is bigger than threshold else False
    else:
        neighbors = get_neighbors(training_set, test_set[0], K_POINTS)
        return predict(neighbors)
Exemple #15
0
def q14(never_used_param):
	n_params = 2
	N = 1000
	X, y = load_dataset(N, f, n_params, noise_level=0.1)
	lr = linreg.LinearRegression()
	lr.fit(transform(X), y)
	return lr.w
Exemple #16
0
def evaluate(args):
    paddle.set_device(args.device)

    # create dataset.
    test_ds = load_dataset(datafiles=(os.path.join(args.data_dir, 'test.tsv')))
    word_vocab = load_vocab(os.path.join(args.data_dir, 'word.dic'))
    label_vocab = load_vocab(os.path.join(args.data_dir, 'tag.dic'))
    # q2b.dic is used to replace DBC case to SBC case
    normlize_vocab = load_vocab(os.path.join(args.data_dir, 'q2b.dic'))

    trans_func = partial(
        convert_example,
        max_seq_len=args.max_seq_len,
        word_vocab=word_vocab,
        label_vocab=label_vocab,
        normlize_vocab=normlize_vocab)
    test_ds.map(trans_func)

    batchify_fn = lambda samples, fn=Tuple(
        Pad(axis=0, pad_val=0, dtype='int64'),  # word_ids
        Stack(dtype='int64'),  # length
        Pad(axis=0, pad_val=0, dtype='int64'),  # label_ids
    ): fn(samples)

    # Create sampler for dataloader
    test_sampler = paddle.io.BatchSampler(
        dataset=test_ds,
        batch_size=args.batch_size,
        shuffle=False,
        drop_last=False)
    test_loader = paddle.io.DataLoader(
        dataset=test_ds,
        batch_sampler=test_sampler,
        return_list=True,
        collate_fn=batchify_fn)

    # Define the model network and metric evaluator
    model = BiGruCrf(args.emb_dim, args.hidden_size,
                     len(word_vocab), len(label_vocab))
    chunk_evaluator = ChunkEvaluator(label_list=label_vocab.keys(), suffix=True)

    # Load the model and start predicting
    model_dict = paddle.load(args.init_checkpoint)
    model.load_dict(model_dict)

    model.eval()
    chunk_evaluator.reset()
    for batch in test_loader:
        token_ids, length, labels = batch
        preds = model(token_ids, length)
        num_infer_chunks, num_label_chunks, num_correct_chunks = chunk_evaluator.compute(
            length, preds, labels)
        chunk_evaluator.update(num_infer_chunks.numpy(),
                               num_label_chunks.numpy(),
                               num_correct_chunks.numpy())
        precision, recall, f1_score = chunk_evaluator.accumulate()
    print("eval precision: %f, recall: %f, f1: %f" %
          (precision, recall, f1_score))
Exemple #17
0
def train_node2vec(dataset_name,
                   param_return,
                   param_in_out,
                   no_rw=5,
                   length_rw=5):
    """
    Trains the node2vec embedding.
    :param dataset_name:    string for name of dataset
    :param param_return: Bias of going back to the old node (p)
    :param param_in_out: Bias of moving forward to a new node (q)
    :param no_rw: number of random walks, default is 5
    :param length_rw: length of random walks, default is 5

    :return: the trained embedding matrix
    """

    logging.info("Training the node2vec embedding.")
    dataset = load_dataset(dataset_name)

    # we assume one graph per dataset for this assignment
    graph = dataset[0]
    number_nodes = graph.number_of_nodes()

    # TODO test parameters
    early_stopping_callback = tf.keras.callbacks.EarlyStopping(monitor='loss',
                                                               patience=2)

    logging.info("Computing random walks.")
    walks, walks_neg = compute_random_walks(graph, param_return, param_in_out,
                                            no_rw)

    logging.info("Compiling model.")
    node_input = layers.Input(shape=1, name="input_node")
    rw_input = layers.Input(shape=length_rw, name="input_random_walk")
    rw_neg_input = layers.Input(shape=length_rw, name="input_negative_sample")

    embed_model = get_model([node_input, rw_input, rw_neg_input], number_nodes)
    embed_model.compile(optimizer=tf.keras.optimizers.Adam(lr=config.EMBED_LR),
                        loss=custom_loss)

    logging.info("Training.")
    node_input = np.asarray(np.arange(number_nodes).tolist() * no_rw)
    dummy_labels = np.zeros([number_nodes * no_rw, length_rw, 1],
                            dtype='float64')
    embed_model.fit(
        x=[node_input,
           walks.astype(np.float64),
           walks_neg.astype(np.float64)],
        y=dummy_labels,
        batch_size=config.EMBED_BATCH_SIZE,
        epochs=config.EMBED_EPOCH_MAX,
        verbose=2,
        callbacks=[early_stopping_callback])

    embed_layer = embed_model.get_layer('embedding')
    embed_weights = embed_layer.get_weights()[0]

    return embed_weights
Exemple #18
0
def load_datasets(months_to_live_start = 1, months_to_live_stop = 15, binarize_categorical = True):
	Xs = {}
	Ys = {}
	for i in xrange(months_to_live_start, months_to_live_stop + 1):
		print "Loading dataset for %d months to live" % i 
		X, Y = data.load_dataset(months_to_live = i, binarize_categorical = binarize_categorical)
		Xs[i] = X
		Ys[i] = Y
	return Xs, Ys
def main():
    config = Config()
    train_iters, dev_iters, test_iters, vocab = load_dataset(config)
    print('Vocab size:', len(vocab))
    model_F = StyleTransformer(config, vocab).to(config.device)
    model_D = Discriminator(config, vocab).to(config.device)
    print(config.discriminator_method)

    train(config, vocab, model_F, model_D, train_iters, dev_iters, test_iters)
Exemple #20
0
def q13(never_used_param):
	n_params = 2
	N = 1000
	X, y = load_dataset(N, f, n_params, noise_level=0.1)
	lr = linreg.LinearRegression()
	lr.fit(X, y)
	err = np.sum(np.vectorize(sign)(lr.predict(X)) != y)
	err_rate = err * 1.0 / N
	return err_rate
def main():
    config = Config()
    train_iters, dev_iters, test_iters, vocab = load_dataset(config)
    print('Vocab size:', len(vocab))
    model_F = StyleTransformer(config, vocab).to(config.device)
    model_F.load_state_dict(torch.load('./save/Jun11115103/ckpts/9925_F.pth'))
    model_D = Discriminator(config, vocab).to(config.device)
    print(config.discriminator_method)

    train(config, vocab, model_F, model_D, train_iters, dev_iters, test_iters)
def test():
    config = Config()
    train_iters, dev_iters, test_iters, vocab = load_dataset(config)

    step = 125
    model_F = StyleTransformer(config, vocab).to(config.device)
    model_F.load_state_dict(
        torch.load(f'./save/Jun15042756/ckpts/{step}_F.pth'))

    auto_eval(config, vocab, model_F, test_iters, 1, step)
def trainModel(num_epochs, embedding):
	'''
		Trains the attention model for the specified number of epochs
		using the specified embedding.

		Arguments:
			num_epochs: The number of training epochs
			embedding:  The embedding to use for training
	'''

	vocab_size, embed_size = embedding.vectors.shape
	
	train_ds = data.load_dataset("data/train.csv", encoding=embedding.key_to_index.get)
	train_loader = DataLoader(train_ds, batch_size=10, collate_fn=data.collate, shuffle=True)
	test_ds = data.load_dataset("data/test.csv", encoding=embedding.key_to_index.get)
	test_loader = DataLoader(test_ds, batch_size=10, collate_fn=data.collate, shuffle=False)
	
	model = md.ConvAttModel(vocab_size=vocab_size, embed_size=embed_size, embedding=embedding.vectors)
	md.train(model, train_loader, test_loader, num_epochs)
Exemple #24
0
def main():
    import yaml

    config = yaml.load(open("const.yaml", "r"), Loader=yaml.BaseLoader)
    path, dataset_name = config["data"]["base"], config["data"]["dataset"]
    path = download(path, dataset_name)

    train_dataset = load_dataset(path,
                                 256,
                                 256,
                                 "train",
                                 jitter=30,
                                 mirror=True)
    train_dataset = train_dataset.cache().shuffle(400).batch(40).repeat()
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)

    test_dataset = load_dataset(path, 256, 256, "test")
    test_dataset = test_dataset.batch(50)
    fit(train_dataset, test_dataset, steps=10, preview=10)
Exemple #25
0
 def load_dataset(self, hparams):
     dataset = load_dataset(
         hparams.train_dataset_folder,
         image_size=hparams.image_size,
         nb_channels=hparams.nb_channels,
         dataset_type=hparams.dataset_type,
     )
     if hparams.nb_examples is not None:
         dataset = Shuffle(dataset)
         dataset = SubSet(dataset, hparams.nb_examples)
     return dataset
Exemple #26
0
def train(run_start=1):
    start_t = time()
    print('\nTraining')

    writer = SummaryWriter('runs_new/' + RUN_NAME)

    # Load data
    datasets = load_dataset('all')
    s_train = datasets['train']
    s_db = datasets['db']

    # Load NN
    net = Net().double()
    # Resume training if not started from 1
    if run_start != 1:
        net.load()
    optimizer = optim.Adam(net.parameters(), lr=1e-3)

    loss_sum = 0
    for run in range(run_start, RUNS + 1):
        # Save state_dict
        net.store()

        batch = generate_triplet_batch(s_train, s_db, BATCH_SIZE)
        results = list()

        optimizer.zero_grad()

        for i in batch:
            results.append(net(i[0].view(1, 3, 64, 64)))

        loss = l_triplets(results, batch) + l_pairs(results)
        # loss = l_triplets(results) + l_pairs(results)
        loss_sum += float(loss)

        if (run * BATCH_SIZE) % 100 == 0:
            loss_sum = loss_sum / 100
            print('Run: ', run * BATCH_SIZE, '\tLoss Average: ', loss_sum)
            writer.add_scalar(tag='avg_training_loss',
                              scalar_value=loss_sum,
                              global_step=run * BATCH_SIZE)
            loss_sum = 0

        if (run * BATCH_SIZE) % 10000 == 0:
            test(run=run * BATCH_SIZE,
                 s_test=datasets['test'],
                 s_db=datasets['db'],
                 writer=writer)

        loss.backward()
        optimizer.step()

    print('Finished in ', str(datetime.timedelta(seconds=time() - start_t)),
          's\n')
def enumerate_multiwoz_invalid_indices(dataset_name,
                                       logger,
                                       correct_requestables=False):
    dataset = load_dataset(dataset_name, use_goal=True)
    dataset = wrap_dataset_with_cache(dataset)
    evaluator = MultiWozEvaluator(dataset,
                                  logger=logger,
                                  is_multiwoz_eval=True)
    responses = (item.response for item in dataset)
    beliefs = (evaluator.belief_parser(item.belief) for item in dataset)
    dialogues = evaluator.pack_dialogues(dataset, beliefs, responses)
    successes, matches = 0, 0
    stats = tuple(Counter() for _ in range(3))
    domain_total, domain_match, domain_success = stats
    total = 0

    offset = 0
    with tqdm(total=len(dataset),
              desc='identifying bad dialogues') as progress:
        for idx, (items, goal, beliefs, responses,
                  booked_domains) in enumerate(dialogues):
            goal, real_requestables = evaluator._get_goal_and_requestables(
                items[-1].raw_belief, goal)
            goal = fix_goal(goal, beliefs[-1])
            provided_requestables, venue_offered = evaluator._get_requestables_and_venues(
                beliefs, responses, booked_domains)
            success, match = evaluator._evaluate_generated_dialogue(
                real_requestables, provided_requestables, venue_offered, goal,
                stats)
            if match != 1 or (success != 1 and correct_requestables):
                for i in range(offset, offset + len(items)):
                    yield f'{i}'
            elif len(set(map(format_belief, beliefs))) == 1 and len(items) > 1:
                match, success = 0, 0
                for i in range(offset, offset + len(items)):
                    yield f'{i}'

            successes += success
            matches += match
            total += 1
            offset += len(items)
            progress.update(len(items))

        domain_results = dict()
        for key in domain_total.keys():
            domain_results[key] = domain_match[key] / float(domain_total[key]), \
                domain_success[key] / float(domain_total[key])

        match, success = matches / float(total), successes / float(total)
        logger.info(f'match: {match:.4f}, success: {success:.4f}')
        for domain, (match, success) in domain_results.items():
            logger.info(
                f'   - domain: {domain}, match: {match:.4f}, success: {success:.4f}'
            )
Exemple #28
0
def run_train():
    cars, notcars = load_dataset()
    #cars, notcars = load_smallset()
    feature_parameter = selected_feature_parameter()

    car_features = extract_features(cars, feature_parameter=feature_parameter)
    notcar_features = extract_features(notcars,
                                       feature_parameter=feature_parameter)
    clf, X_scaler = train_classifier(car_features=car_features,
                                     notcar_features=notcar_features)
    save_classifier(clf=clf, scaler=X_scaler, filename="classifier.p")
Exemple #29
0
 def _get_wrapped_dataset(self, set_) -> InputData:
     ds = load_dataset(
         self.path,
         self.context.get_data_config()["height"],
         self.context.get_data_config()["width"],
         set_,
         self.context.get_hparam("jitter"),
         self.context.get_hparam("mirror"),
     )
     ds = self.context.wrap_dataset(ds)
     return ds
Exemple #30
0
def train(dataset, logs):
    log_dir = '{}/iter_{:%Y%m%dT%H%M%S}'.format(args.logs,
                                                datetime.datetime.now())
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    vocab, train_tuple, val_tuple = generate_train_data(
        load_dataset(dataset), log_dir, NUM_TRAINING_SAMPLES)
    training_seq, validation_seq = generate_train_sequences(
        train_tuple, val_tuple)
    model = create_model(log_dir, val_tuple, vocab)
    model.train(training_seq, validation_seq)
Exemple #31
0
def main():
    setup_logging()
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    parser = argparse.ArgumentParser()
    parser.add_argument('dataset')
    parser.add_argument('--backtranslations', default=None)
    args = parser.parse_args()
    dataset = load_dataset(args.dataset)
    transform = load_backtranslation_transformation(args.backtranslations
                                                    or args.dataset)
    validate_coverage(dataset, transform.dictionary, logger)
Exemple #32
0
    def __init__(self, dataset, fitness_metric):
        """
            Creates the Evaluator instance and loads the dataset.

            Parameters
            ----------
            dataset : str
                dataset to be loaded
        """

        self.dataset = load_dataset(dataset)
        self.fitness_metric = fitness_metric
def start():
    movies = load_dataset()
    model = MatrixPreferenceDataModel(movies['data'])
    option = int(input("Enter: \n 1 for User Based Recommender \n 2 for Item Based Recommender \n"))
    if option != 1 and option != 2:
        print("Invalid Input")
        return
    if option == 1:
        similarity = UserSimilarity(model, cosine_distances)
        neighborhood = NearestNeighborsStrategy()
        recsys = UserBasedRecommender(model, similarity, neighborhood)

    if option == 2:
        similarity = ItemSimilarity(model, cosine_distances)
        neighborhood = ItemsNeighborhoodStrategy()
        recsys = ItemBasedRecommender(model, similarity, neighborhood)

    evaluator = Evaluator()
    all_scores = evaluator.evaluate(recsys, permutation=False)
    print all_scores
Exemple #34
0
def main(num_epochs=NUM_EPOCHS):
    print("Building network ...")


    l_in = lasagne.layers.InputLayer((None, MAX_LENGTH, 3))
    batchsize, seqlen, _ = l_in.input_var.shape

    l_forward = lasagne.layers.LSTMLayer(
        l_in, N_HIDDEN, grad_clipping=GRAD_CLIP,
        nonlinearity=lasagne.nonlinearities.tanh)

    l_backward = lasagne.layers.LSTMLayer(
        l_in, N_HIDDEN, grad_clipping=GRAD_CLIP,
        nonlinearity=lasagne.nonlinearities.tanh, backwards=True)

    l_recurrent = lasagne.layers.ElemwiseMergeLayer([l_forward, l_backward], T.mul)

    softmax = lasagne.nonlinearities.softmax

    l_reshape = lasagne.layers.ReshapeLayer(l_recurrent,(-1, N_HIDDEN))

    l_drop_out = lasagne.layers.DropoutLayer(l_reshape, p=0.95)

    l_dense = lasagne.layers.DenseLayer(l_drop_out, num_units=1, nonlinearity=lasagne.nonlinearities.tanh)

    l_drop_out_2 = lasagne.layers.DropoutLayer(l_dense, p=0.95)

    #l_drop_out_2 = lasagne.layers.DropoutLayer(l_reshape, p=0.5)

    l_softmax = lasagne.layers.DenseLayer(l_drop_out_2, num_units=2, nonlinearity = softmax)

    l_out = lasagne.layers.ReshapeLayer(l_softmax, (batchsize, seqlen, 2)) 

    # Now, we'll concatenate the outputs to combine them.
    #l_sum = lasagne.layers.ConcatLayer([l_forward, l_backward], 2)

    #l_shp = lasagne.layers.ReshapeLayer(l_sum, (-1, N_HIDDEN))

    # Our output layer is a simple dense connection, with 1 output unit
    #l_final = lasagne.layers.DenseLayer(l_shp, num_units=1, nonlinearity=lasagne.nonlinearities.tanh)
    
    #l_out = lasagne.layers.ReshapeLayer(l_final, (batchsize, seqlen, 1))

    target_values = T.tensor3('target_output')

    # lasagne.layers.get_output produces a variable for the output of the net
    network_output = lasagne.layers.get_output(l_out)

    # The value we care about is the final value produced  for each sequence
    #predicted_values = T.argmax(network_output, axis = 2, keepdims = True)
    predicted_values = network_output

    # Our cost will be mean-squared error
    cost = T.mean((T.argmax(predicted_values, axis = 2, keepdims = True) - target_values)**2)
    #cost = lasagne.objectives.squared_error(T.argmax(predicted_values, axis = 2)+1, target_values).mean()
    #cost = cost.mean()

    acc = T.mean(T.eq(T.argmax(predicted_values, axis = 2, keepdims = True), target_values),
                      dtype=theano.config.floatX)

    # Retrieve all parameters from the network
    all_params = lasagne.layers.get_all_params(l_out)

    # Compute SGD updates for training
    print("Computing updates ...")
    updates = lasagne.updates.nesterov_momentum(cost, all_params, learning_rate=LEARNING_RATE)

    # Theano functions for training and computing cost
    print("Compiling functions ...")
    train = theano.function([l_in.input_var, target_values],
                            cost, updates=updates)
    compute_cost = theano.function(
        [l_in.input_var, target_values], cost)
    compute_acc = theano.function(
        [l_in.input_var, target_values], acc)

    get_out = T.argmax(predicted_values, axis = 2, keepdims = True)
    get_prediction = theano.function([l_in.input_var], get_out)
    get_prediction_2 = theano.function([l_in.input_var], predicted_values)

    # We'll use this "validation set" to periodically check progress
    X_train, y_train, X_val, y_val, X_test, y_test = dt.load_dataset(BATCH_SIZE, row_count, column_count, plane_count, DEBUG)
 
    print("Training ...")
    #print(get_prediction(X_train[0:1]))

    try:
        index = 0 #*len(dt.labels_rev)
        dt.saveImage(y_test[0], "results/y_GT.png",row_count, column_count,  plane_count)
        for epoch in range(num_epochs):
            X = X_train[EPOCH_SIZE*epoch:EPOCH_SIZE*(epoch+1)]
            y = y_train[EPOCH_SIZE*epoch:EPOCH_SIZE*(epoch+1)]
            train(X, y)

            cost_val = compute_cost(X_val, y_val)
            cost_test = compute_acc(X_test, y_test)*100
            #print(y_test[0]) 
            #print(get_prediction(X_test)[0])
            #print(get_prediction_2(X_test)[0]) 
            print("Epoch {} validation cost = {}  test acc = {} %".format(epoch, cost_val, cost_test))

            dt.saveImage(get_prediction(X_test)[0], "results/y_output_{}.png".format(epoch), row_count, column_count,  plane_count, True)

        dt.saveImage(get_prediction(X_test)[0], "results/y_output.png", row_count, column_count,  plane_count, True)

    except KeyboardInterrupt:
        pass
Exemple #35
0
def main():
    # Supported preprocessing/network list
    method_list = ['nfp', 'ggnn', 'schnet', 'weavenet', 'rsgcn', 'relgcn',
                   'relgat']
    label_names = D.get_tox21_label_names()
    iterator_type = ['serial', 'balanced']

    parser = argparse.ArgumentParser(
        description='Multitask Learning with Tox21.')
    parser.add_argument('--method', '-m', type=str, choices=method_list,
                        default='nfp', help='graph convolution model to use '
                        'as a predictor.')
    parser.add_argument('--label', '-l', type=str, choices=label_names,
                        default='', help='target label for logistic '
                        'regression. Use all labels if this option '
                        'is not specified.')
    parser.add_argument('--iterator-type', type=str, choices=iterator_type,
                        default='serial', help='iterator type. If `balanced` '
                        'is specified, data is sampled to take same number of'
                        'positive/negative labels during training.')
    parser.add_argument('--eval-mode', type=int, default=1,
                        help='Evaluation mode.'
                        '0: only binary_accuracy is calculated.'
                        '1: binary_accuracy and ROC-AUC score is calculated')
    parser.add_argument('--conv-layers', '-c', type=int, default=4,
                        help='number of convolution layers')
    parser.add_argument('--batchsize', '-b', type=int, default=32,
                        help='batch size')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID to use. Negative value indicates '
                        'not to use GPU and to run the code in CPU.')
    parser.add_argument('--out', '-o', type=str, default='result',
                        help='path to output directory')
    parser.add_argument('--epoch', '-e', type=int, default=10,
                        help='number of epochs')
    parser.add_argument('--unit-num', '-u', type=int, default=16,
                        help='number of units in one layer of the model')
    parser.add_argument('--resume', '-r', type=str, default='',
                        help='path to a trainer snapshot')
    parser.add_argument('--frequency', '-f', type=int, default=-1,
                        help='Frequency of taking a snapshot')
    parser.add_argument('--protocol', type=int, default=2,
                        help='protocol version for pickle')
    parser.add_argument('--model-filename', type=str, default='classifier.pkl',
                        help='file name for pickled model')
    parser.add_argument('--num-data', type=int, default=-1,
                        help='Number of data to be parsed from parser.'
                             '-1 indicates to parse all data.')
    args = parser.parse_args()

    method = args.method
    if args.label:
        labels = args.label
        class_num = len(labels) if isinstance(labels, list) else 1
    else:
        labels = None
        class_num = len(label_names)

    # Dataset preparation
    train, val, _ = data.load_dataset(method, labels, num_data=args.num_data)

    # Network
    predictor_ = predictor.build_predictor(
        method, args.unit_num, args.conv_layers, class_num)

    iterator_type = args.iterator_type
    if iterator_type == 'serial':
        train_iter = I.SerialIterator(train, args.batchsize)
    elif iterator_type == 'balanced':
        if class_num > 1:
            raise ValueError('BalancedSerialIterator can be used with only one'
                             'label classification, please specify label to'
                             'be predicted by --label option.')
        train_iter = BalancedSerialIterator(
            train, args.batchsize, train.features[:, -1], ignore_labels=-1)
        train_iter.show_label_stats()
    else:
        raise ValueError('Invalid iterator type {}'.format(iterator_type))
    val_iter = I.SerialIterator(val, args.batchsize,
                                repeat=False, shuffle=False)

    classifier = Classifier(predictor_,
                            lossfun=F.sigmoid_cross_entropy,
                            metrics_fun=F.binary_accuracy,
                            device=args.gpu)

    optimizer = O.Adam()
    optimizer.setup(classifier)

    updater = training.StandardUpdater(
        train_iter, optimizer, device=args.gpu, converter=concat_mols)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    trainer.extend(E.Evaluator(val_iter, classifier,
                               device=args.gpu, converter=concat_mols))
    trainer.extend(E.LogReport())

    eval_mode = args.eval_mode
    if eval_mode == 0:
        trainer.extend(E.PrintReport([
            'epoch', 'main/loss', 'main/accuracy', 'validation/main/loss',
            'validation/main/accuracy', 'elapsed_time']))
    elif eval_mode == 1:
        train_eval_iter = I.SerialIterator(train, args.batchsize,
                                           repeat=False, shuffle=False)
        trainer.extend(ROCAUCEvaluator(
            train_eval_iter, classifier, eval_func=predictor_,
            device=args.gpu, converter=concat_mols, name='train',
            pos_labels=1, ignore_labels=-1, raise_value_error=False))
        # extension name='validation' is already used by `Evaluator`,
        # instead extension name `val` is used.
        trainer.extend(ROCAUCEvaluator(
            val_iter, classifier, eval_func=predictor_,
            device=args.gpu, converter=concat_mols, name='val',
            pos_labels=1, ignore_labels=-1))
        trainer.extend(E.PrintReport([
            'epoch', 'main/loss', 'main/accuracy', 'train/main/roc_auc',
            'validation/main/loss', 'validation/main/accuracy',
            'val/main/roc_auc', 'elapsed_time']))
    else:
        raise ValueError('Invalid accfun_mode {}'.format(eval_mode))
    trainer.extend(E.ProgressBar(update_interval=10))
    frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
    trainer.extend(E.snapshot(), trigger=(frequency, 'epoch'))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()

    config = {'method': args.method,
              'conv_layers': args.conv_layers,
              'unit_num': args.unit_num,
              'labels': args.label}

    with open(os.path.join(args.out, 'config.json'), 'w') as o:
        o.write(json.dumps(config))

    classifier.save_pickle(os.path.join(args.out, args.model_filename),
                           protocol=args.protocol)
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)


program = "detect.py"
version = "0.1"
usage = "Usage: %prog [OPTION]... [REST]"
description = "Instrument detection using scikit-learn"

parser = optparse.OptionParser(usage=usage, version=version, description=description, prog=program)
parser.add_option("-d", "--directory", dest="directory", action="store",
                  help="download data to this directory" )
(options, args) = parser.parse_args()

if not options.directory:
    print "set -d <directory> to save data to"
    sys.exit(1)
logger.info('loading dataset from directory: %s' % options.directory)

DIRECTORY = options.directory
dataset  = data.load_dataset(DIRECTORY)
target_names = [item['target'] for item in dataset]
unique_tarets = np.unique(target_names)
logger.info('%d targets found: %s' % (len(unique_tarets), unique_tarets))




def main():
    parser = argparse.ArgumentParser(
        description='Predict with a trained model.')
    parser.add_argument('--in-dir', '-i', type=str, default='result',
                        help='Path to the result directory of the training '
                        'script.')
    parser.add_argument('--batchsize', '-b', type=int, default=128,
                        help='batch size')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID to use. Negative value indicates '
                        'not to use GPU and to run the code in CPU.')
    parser.add_argument('--model-filename', type=str, default='classifier.pkl',
                        help='file name for pickled model')
    parser.add_argument('--num-data', type=int, default=-1,
                        help='Number of data to be parsed from parser.'
                             '-1 indicates to parse all data.')
    args = parser.parse_args()

    with open(os.path.join(args.in_dir, 'config.json'), 'r') as i:
        config = json.loads(i.read())

    method = config['method']
    labels = config['labels']

    _, test, _ = data.load_dataset(method, labels, num_data=args.num_data)
    y_test = test.get_datasets()[-1]

    # Load pretrained model
    clf = Classifier.load_pickle(
        os.path.join(args.in_dir, args.model_filename),
        device=args.gpu)  # type: Classifier

    # ---- predict ---
    print('Predicting...')

    # We need to feed only input features `x` to `predict`/`predict_proba`.
    # This converter extracts only inputs (x1, x2, ...) from the features which
    # consist of input `x` and label `t` (x1, x2, ..., t).
    def extract_inputs(batch, device=None):
        return concat_mols(batch, device=device)[:-1]

    def postprocess_pred(x):
        x_array = cuda.to_cpu(x.data)
        return numpy.where(x_array > 0, 1, 0)
    y_pred = clf.predict(test, converter=extract_inputs,
                         postprocess_fn=postprocess_pred)
    y_proba = clf.predict_proba(test, converter=extract_inputs,
                                postprocess_fn=F.sigmoid)

    # `predict` method returns the prediction label (0: non-toxic, 1:toxic)
    print('y_pread.shape = {}, y_pred[:5, 0] = {}'
          .format(y_pred.shape, y_pred[:5, 0]))
    # `predict_proba` method returns the probability to be toxic
    print('y_proba.shape = {}, y_proba[:5, 0] = {}'
          .format(y_proba.shape, y_proba[:5, 0]))
    # --- predict end ---

    if y_pred.ndim == 1:
        y_pred = y_pred[:, None]

    if y_pred.shape != y_test.shape:
        raise RuntimeError('The shape of the prediction result array and '
                           'that of the ground truth array do not match. '
                           'Contents of the input directory may be corrupted '
                           'or modified.')

    statistics = []
    for t, p in six.moves.zip(y_test.T, y_pred.T):
        idx = t != -1
        n_correct = (t[idx] == p[idx]).sum()
        n_total = len(t[idx])
        accuracy = float(n_correct) / n_total
        statistics.append([n_correct, n_total, accuracy])

    print('{:>6} {:>8} {:>8} {:>8}'
          .format('TaskID', 'Correct', 'Total', 'Accuracy'))
    for idx, (n_correct, n_total, accuracy) in enumerate(statistics):
        print('task{:>2} {:>8} {:>8} {:>8.4f}'
              .format(idx, n_correct, n_total, accuracy))

    prediction_result_file = 'prediction.npz'
    print('Save prediction result to {}'.format(prediction_result_file))
    numpy.savez_compressed(prediction_result_file, y_pred)

    # --- evaluate ---
    # To calc loss/accuracy, we can use `Evaluator`, `ROCAUCEvaluator`
    print('Evaluating...')
    test_iterator = SerialIterator(test, 16, repeat=False, shuffle=False)
    eval_result = Evaluator(
        test_iterator, clf, converter=concat_mols, device=args.gpu)()
    print('Evaluation result: ', eval_result)
    rocauc_result = ROCAUCEvaluator(
        test_iterator, clf, converter=concat_mols, device=args.gpu,
        eval_func=clf.predictor, name='test', ignore_labels=-1)()
    print('ROCAUC Evaluation result: ', rocauc_result)
    with open(os.path.join(args.in_dir, 'eval_result.json'), 'w') as f:
        json.dump(rocauc_result, f)
Exemple #38
0
		mask = np.array(~(Y_pred.isnull()))
		print expert.strip(), "n =", np.sum(mask)
		Y_pred_subset = np.array(Y_pred[mask].astype('float'))
		print "-- %0.4f" % error(Y[mask], Y_pred_subset, deceased[mask])
		Y_expert_combined[mask] += Y_pred_subset
		Y_expert_count[mask] += 1

	combined_mask = Y_expert_count > 0
	Y_expert_combined = Y_expert_combined[combined_mask]
	Y_expert_combined /= Y_expert_count[combined_mask]
	return error(Y[combined_mask], Y_expert_combined, deceased[combined_mask])


if __name__ == '__main__':

	X, Y, deceased, experts, test_mask = data.load_dataset(binarize_categorical = True)

	

	print "Data shape", X.shape

	print "---"
	print "Average prediction error = %0.4f" % average_expert_error(Y, experts, deceased)
	print "---"

	
	shuffle_data = False
	if shuffle_data:
		random_index = np.arange(len(Y))
		np.random.shuffle(random_index)
		X = np.array(X)
Exemple #39
0
def train(*, dataset='mnist'):
    z1 = 100
    z2 = 512
    batch_size = 64
    lr = 0.1

    dataset = load_dataset(dataset, split='train')
    x0, _ = dataset[0]
    c, h, w = x0.size()
    dataloader = torch.utils.data.DataLoader(
        dataset, 
        batch_size=batch_size,
        shuffle=True, 
        num_workers=1
    )

    w1 = torch.rand(w*h*c, z1).cuda()
    w1 = Variable(w1, requires_grad=True)
    xavier_uniform(w1.data)
    """
    w1_2 = torch.rand(z1, z2)
    w1_2 = Variable(w1_2, requires_grad=True)
    xavier_uniform(w1_2.data)
    w1_2 = w1_2.cuda()
        

    wx_2 = torch.rand(w*h*c, z2)
    wx_2 = Variable(wx_2, requires_grad=True)
    xavier_uniform(wx_2.data)
    wx_2 = wx_2.cuda()
    """
    
    bias = torch.zeros(w*h*c).cuda()
    bias = Variable(bias, requires_grad=True)

    print(w1.is_leaf, bias.is_leaf)

    grads = {}
    momentum = 0.9
    def save_grad(v):
        def hook(grad):
            v.grad = grad
            if not hasattr(v, 'mem'):
                v.mem = 0.0
            v.mem = v.mem * momentum + v.grad.data * (1 - momentum)
        return hook
    
    #params = [w1, w1_2, wx_2, bias]
    params = [w1, bias]
    optim = torch.optim.Adadelta(params, lr=0.1)
    #for p in params:
    #    p.register_hook(save_grad(p))
    
    gamma = 5.0
    nb_updates = 0
    for _ in range(1000):
        for X, y in dataloader:
            optim.zero_grad()
            X = Variable(X)
            #w2 = torch.matmul(w1, w1_2)
            X = X.cuda()
            X = X.view(X.size(0), -1)
            """
            a2 = torch.matmul(X, wx_2)
            a2 = a2 * (a2 > 0.8).float()
            Xrec = torch.matmul(a2, w2.transpose(0, 1)) + bias
            Xrec = torch.nn.Sigmoid()(Xrec)
            """
            hid = torch.matmul(X, w1)
            hid = hid * (hid > 1.0).float()
            Xrec = torch.matmul(hid, w1.transpose(1, 0).contiguous()) + bias
            Xrec = torch.nn.Sigmoid()(Xrec)
            e1 = ((Xrec - X)**2).sum(1).mean()
            e2 = e1
            e3 = e1
            #e2 = torch.abs(w1_2).mean()
            #e3 = torch.abs(a2).mean()
            loss = e1
            loss.backward()
            optim.step()
            #for p in params:
            #    p.data -= lr * p.mem
            if nb_updates % 100 == 0:
                print('loss : %.3f %.3f %.3f' % (e1.data[0], e2.data[0], e3.data[0]))
                
                active = (hid.data>0).float().sum(1)
                print('nbActive : {:.4f} +- {:.4f}'.format(active.mean(), active.std()))
                im = Xrec.data.cpu().numpy()
                im = im.reshape(im.shape[0], c, h, w)
                im = grid_of_images_default(im, normalize=True)
                imsave('x.png', im)

                im = w1.data.cpu().numpy()
                im = im.reshape((c, h, w, z1)).transpose((3, 0, 1, 2))
                im = grid_of_images_default(im, normalize=True)
                imsave('w1.png', im)
                """
                im = wx_2.data.cpu().numpy()
                im = im.reshape((c, h, w, z2)).transpose((3, 0, 1, 2))
                im = grid_of_images_default(im, normalize=True)
                imsave('w2.png', im)
                """

            nb_updates += 1
Exemple #40
0
      THEANO_FLAGS='cuda.root=/Developer/NVIDIA/CUDA-7.5,device=gpu,floatX=float32' python train.py
    Output after 4 epochs on CPU: ~0.8146
    Time per epoch on CPU (Core i7): ~150s.
'''

max_features = 3
maxlen = 19200  # cut texts after this number of words (among top max_features most common words)
batch_size = 10
nb_epoch = 1
row_count = 120
column_count = 160
number_of_training_data = 455
DEBUG = False 

N_HIDDEN = 20
X_train, y_train, X_val, y_val, X_test, y_test = dt.load_dataset(number_of_training_data, row_count, column_count, max_features, DEBUG)
dt.saveImage(y_train[0], "gt.png", row_count, column_count,  3, True)

print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')

print("Pad sequences (samples x time)")
#X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
#X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
y_train = np.array(y_train)
y_test = np.array(y_test)
print('Build model...')

Exemple #41
0
import data
import argparse
from model import EDSR
parser = argparse.ArgumentParser()
parser.add_argument("--dataset",default="images_register")
parser.add_argument("--imgsize",default=320,type=int)
parser.add_argument("--scale",default=2,type=int)
parser.add_argument("--layers",default=16,type=int)
parser.add_argument("--featuresize",default=128,type=int)
parser.add_argument("--batchsize",default=10,type=int)
parser.add_argument("--savedir",default='saved_models')
parser.add_argument("--iterations",default=400,type=int)
args = parser.parse_args()
data.load_dataset(args.dataset)
down_size = args.imgsize//args.scale
network = EDSR(down_size,args.layers,args.featuresize,args.scale, output_channels=1)
network.set_data_fn(data.get_batch,(args.batchsize,args.imgsize,down_size),data.get_test_set,(args.imgsize,down_size))
network.train(args.iterations,args.savedir)