Exemple #1
0
def main(_):

	print("Parameters: ")
	for k, v in FLAGS.__flags.items():
		print("{} = {}".format(k, v))

	if not os.path.exists("./prepro/"):
		os.makedirs("./prepro/")

	if FLAGS.prepro:
		img_feat, tags_idx, a_tags_idx, vocab_processor = data_utils.load_train_data(FLAGS.train_dir, FLAGS.tag_path, FLAGS.prepro_dir, FLAGS.vocab)	
	else:
		img_feat = cPickle.load(open(os.path.join(FLAGS.prepro_dir, "img_feat.dat"), 'rb'))
		tags_idx = cPickle.load(open(os.path.join(FLAGS.prepro_dir, "tag_ids.dat"), 'rb'))
		a_tags_idx = cPickle.load(open(os.path.join(FLAGS.prepro_dir, "a_tag_ids.dat"), 'rb'))
		vocab_processor = VocabularyProcessor.restore(FLAGS.vocab)
	img_feat = np.array(img_feat, dtype='float32')/127.5 - 1.
	test_tags_idx = data_utils.load_test(FLAGS.test_path, vocab_processor)

	print("Image feature shape: {}".format(img_feat.shape))
	print("Tags index shape: {}".format(tags_idx.shape))
	print("Attribute Tags index shape: {}".format(a_tags_idx.shape))
	print("Vocab size: {}".format(len(vocab_processor._reverse_mapping)))
	print("Vocab max length: {}".format(vocab_processor.max_document_length))
	
	data = Data(img_feat, tags_idx, a_tags_idx, test_tags_idx, FLAGS.z_dim, vocab_processor)

	Model = getattr(sys.modules[__name__], FLAGS.model)	
	print(Model)

	model = Model(data, vocab_processor, FLAGS)
	
	model.build_model()
	
	model.train()
Exemple #2
0
def find_cmaes_settings(start_sigma) -> Tuple[Any, Any]:
    """ Find best settings for baseline image segmentation
        using CMA-ES
    """
    photos, annots = load_train_data()
    photos = [p.astype(np.float32) for p in photos]
    objective = partial(cmaes_metric_from_pool, photos=photos, annots=annots)
    objective_wrapped = cmaes_utils.get_cmaes_params_warp(objective)
    start_values = get_start_params()
    return cma.fmin2(objective_wrapped, start_values, start_sigma)  # xopt, es
Exemple #3
0
def main(_):
    print("Parameters: ")
    log_writeln(outf, "Parameters: ")
    for k, v in FLAGS.__flags.items():
        print("{} = {}".format(k, v))
        log_writeln(outf, "{} = {}".format(k, v))
    if not os.path.exists("./prepro/"):
        os.makedirs("./prepro/")

    if FLAGS.prepro:
        img_feat, tags_idx, a_tags_idx, vocab_processor = data_utils.load_train_data(
            FLAGS.train_dir, FLAGS.tag_path, FLAGS.prepro_dir, FLAGS.vocab)
    else:
        img_feat = cPickle.load(
            open(os.path.join(FLAGS.prepro_dir, "img_feat.dat"), 'rb'))
        tags_idx = cPickle.load(
            open(os.path.join(FLAGS.prepro_dir, "tag_ids.dat"), 'rb'))
        a_tags_idx = cPickle.load(
            open(os.path.join(FLAGS.prepro_dir, "a_tag_ids.dat"), 'rb'))
        vocab_processor = VocabularyProcessor.restore(FLAGS.vocab)
    img_feat = np.array(img_feat, dtype='float32') / 127.5 - 1.
    test_tags_idx = data_utils.load_test(FLAGS.test_path, vocab_processor)

    log_writeln(outf, "Image feature shape: {}".format(img_feat.shape))
    log_writeln(outf, "Tags index shape: {}".format(tags_idx.shape))
    log_writeln(outf,
                "Attribute Tags index shape: {}".format(a_tags_idx.shape))
    log_writeln(outf,
                "Vocab size: {}".format(len(vocab_processor._reverse_mapping)))
    log_writeln(
        outf,
        "Vocab max length: {}".format(vocab_processor.max_document_length))

    data = Data(img_feat, tags_idx, a_tags_idx, test_tags_idx, FLAGS.z_dim,
                vocab_processor)

    Model = getattr(sys.modules[__name__], FLAGS.model)
    print(Model)
    log_writeln(outf, Model)
    model = Model(data, vocab_processor, FLAGS)

    model.build_model()

    model.train()
Exemple #4
0
def run(dataset, DF_layers, DI_layers, n_negs, alpha, gpu='0'):
    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu
    print("##### {} Negative Samples experiment on {} DF: {} DI: {}".format(
        n_negs, dataset, DF_layers, DI_layers))

    learning_rate = 0.0001
    batch_size = 256
    #embed_dim = 256
    #factor_dim = 64

    if torch.cuda.is_available():
        device = torch.device('cuda')
        FloatTensor = torch.cuda.FloatTensor
    else:
        device = torch.device('cpu')
        FloatTensor = torch.FloatTensor
    manualSeed = 706
    random.seed(manualSeed)
    torch.manual_seed(manualSeed)
    print('CUDA Available:', torch.cuda.is_available())

    file_name = 'output/' + dataset + '_J-NCF_' + str(DF_layers) + '_' + str(
        DI_layers) + '_n_' + str(n_negs) + '.txt'
    output = open(file_name, 'w')

    # Datasets
    user_matrix, item_matrix, train_u, train_i, train_r, neg_candidates, u_cnt, user_rating_max = data_utils.load_train_data(
        dataset)
    if dataset == 'ml1m':
        epochs = 100
        eval_batch_size = 100 * 151
        test_users, test_items = data_utils.load_test_ml1m()
    elif dataset == 'ml100k':
        epochs = 100
        eval_batch_size = 100 * 41
        test_users, test_items = data_utils.load_test_data(dataset)
    elif dataset == 'yelp':
        epochs = 50
        eval_batch_size = 100 * 81
        test_users, test_items = data_utils.load_test_data(dataset)
    elif dataset == 'amusic':
        epochs = 100
        eval_batch_size = 100 * 3
        test_users, test_items = data_utils.load_test_data(dataset)
    elif dataset == 'agames':
        epochs = 100
        eval_batch_size = 100 * 34
        test_users, test_items = data_utils.load_test_data(dataset)

    n_users, n_items = user_matrix.shape[0], user_matrix.shape[1]

    user_array = user_matrix.toarray()
    item_array = item_matrix.toarray()
    user_idxlist, item_idxlist = list(range(n_users)), list(range(n_items))

    # Model
    model = JNCF(DF_layers, DI_layers, n_users, n_items,
                 'concat').to(device)  # 'multi' or 'concat'
    pair_loss_function = TOP1  # TOP1 or BPR
    point_loss_function = torch.nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    best_hr = 0.0
    for epoch in range(epochs):
        # Train
        model.train()  # Enable dropout (if have).

        idxlist = np.array(range(len(train_u)))
        np.random.shuffle(idxlist)
        epoch_loss, epoch_pair_loss, epoch_point_loss, epoch_i_point_loss, epoch_j_point_loss = .0, .0, .0, .0, .0

        start_time = time.time()
        for batch_idx, start_idx in enumerate(
                range(0, len(idxlist), batch_size)):
            end_idx = min(start_idx + batch_size, len(idxlist))
            idx = idxlist[start_idx:end_idx]

            u_ids = train_u.take(idx)
            i_ids = train_i.take(idx)
            i_ratings = train_r.take(idx)

            users = FloatTensor(user_array.take(u_ids, axis=0))
            items = FloatTensor(item_array.take(i_ids, axis=0))
            labels = FloatTensor(i_ratings)

            rating_max = FloatTensor(user_rating_max.take(u_ids, axis=0))
            Y_ui = labels / rating_max  # for Normalized BCE
            Y_uj = torch.zeros_like(
                Y_ui)  # for Negative samples point-wise loss

            optimizer.zero_grad()
            point_loss, pair_loss = 0., 0.

            # Negative Sampling
            neg_items_list = []
            for _ in range(0, n_negs):
                neg_items = one_negative_sampling(u_ids, neg_candidates)
                neg_items_list.append(neg_items)

            for ng_idx in range(0, n_negs):
                neg_ids = neg_items_list[ng_idx]
                items_j = FloatTensor(item_array.take(neg_ids, axis=0))

                y_i, y_j = model(users, items, items_j)

                i_point_loss = point_loss_function(y_i,
                                                   Y_ui)  # positive items i
                j_point_loss = point_loss_function(y_j,
                                                   Y_uj)  # negative items j
                point_loss = i_point_loss + j_point_loss
                pair_loss = pair_loss_function(y_i, y_j, n_negs)

                loss = alpha * pair_loss + (1 - alpha) * point_loss

                epoch_loss += loss.item()
                epoch_pair_loss += pair_loss.item()
                epoch_point_loss += point_loss.item()
                epoch_i_point_loss += i_point_loss.item()
                epoch_j_point_loss += j_point_loss.item()

                loss.backward()
                optimizer.step()
        train_time = time.time() - start_time

        # Evaluate
        model.eval()
        HR, NDCG = [], []

        time_E = time.time()
        for start_idx in range(0, len(test_users), eval_batch_size):
            end_idx = min(start_idx + eval_batch_size, len(test_users))
            u_ids = test_users[start_idx:end_idx]
            i_ids = test_items[start_idx:end_idx]

            users = FloatTensor(user_array.take(u_ids, axis=0))
            items = FloatTensor(item_array.take(i_ids, axis=0))

            preds, _ = model(users, items, items)

            e_batch_size = eval_batch_size // 100  # faster eval
            preds = torch.chunk(preds.detach().cpu(), e_batch_size)
            chunked_items = torch.chunk(torch.IntTensor(i_ids), e_batch_size)

            for i, pred in enumerate(preds):
                _, indices = torch.topk(pred, 10)
                recommends = torch.take(chunked_items[i],
                                        indices).numpy().tolist()

                gt_item = chunked_items[i][0].item()
                HR.append(hit(gt_item, recommends))
                NDCG.append(ndcg(gt_item, recommends))

        eval_time = time.time() - time_E
        #if epoch % 10 == 0:
        e_loss = epoch_loss / (batch_idx + 1)
        e_pair = epoch_pair_loss / (batch_idx + 1)
        e_point = epoch_point_loss / (batch_idx + 1)
        e_i_point = epoch_i_point_loss / (batch_idx + 1)
        e_j_point = epoch_j_point_loss / (batch_idx + 1)
        text_1 = '[Epoch {:03d}]'.format(epoch) + '\ttrain: ' + time.strftime(
            '%M: %S',
            time.gmtime(train_time)) + '\tHR: {:.4f}\tNDCG: {:.4f}\n'.format(
                np.mean(HR), np.mean(NDCG))
        text_2 = 'Loss: {:.6f}\tPair: {:.4f}\tPoint: {:.4f}\ti_point: {:.4f}\tj_point: {:.4f}\n'.format(
            e_loss, e_pair, e_point, e_i_point, e_j_point)
        print(text_1[:-1])
        print(text_2[:-1])
        output.write(text_1)
        output.write(text_2)

        if np.mean(HR) > best_hr:
            best_hr, best_ndcg, best_epoch = np.mean(HR), np.mean(NDCG), epoch
    result = 'DF: {} DI: {}. Best epoch {:02d}: HR = {:.4f}, NDCG = {:.4f}\n'.format(
        DF_layers, DI_layers, best_epoch, best_hr, best_ndcg)
    print(result[:-1])
    output.write(result)
    output.close()
Exemple #5
0
        'train_img_dir' : args.train_img_dir
    }



if not os.path.exists("./prepro/"):
    os.makedirs("./prepro/")
if args.pre_parameter == True:
    img_feat = cPickle.load(open(os.path.join(args.prepro_dir, "img_feat.dat"), 'rb'))
    tags_idx = cPickle.load(open(os.path.join(args.prepro_dir, "tag_ids.dat"), 'rb'))
    a_tags_idx = cPickle.load(open(os.path.join(args.prepro_dir, "a_tag_ids.dat"), 'rb'))
    k_tmp_vocab = cPickle.load(open(os.path.join(args.prepro_dir, "k_tmp_vocab_ids.dat"), 'rb'))
    vocab_processor = Vocab_Operator.restore(args.vocab)        

else:
    img_feat, tags_idx, a_tags_idx, vocab_processor, k_tmp_vocab = data_utils.load_train_data(args.train_dir,
    args.tag_path, args.prepro_dir, args.vocab)        


img_feat = np.array(img_feat, dtype='float32')/127.5 - 1.
test_tags_idx = data_utils.load_test(args.test_path, vocab_processor, k_tmp_vocab)

print("Image feature shape: {}".format(img_feat.shape))
print("Tags index shape: {}".format(tags_idx.shape))
print("Attribute Tags index shape: {}".format(a_tags_idx.shape))
print("Test Tags index shape: {}".format(test_tags_idx.shape))

data = Data(img_feat, tags_idx, a_tags_idx, test_tags_idx, args.z_dim, vocab_processor)



dcgan = dcgan.DCGAN(model_options, training_options, data, args.mode, args.resume, args.model_dir)
Exemple #6
0
#embed_dim = 256
#factor_dim = 64

if torch.cuda.is_available():
    device = torch.device('cuda')
    FloatTensor = torch.cuda.FloatTensor
else:
    device = torch.device('cpu')
    FloatTensor = torch.FloatTensor
manualSeed = 706
random.seed(manualSeed)
torch.manual_seed(manualSeed)
print('CUDA Available:', torch.cuda.is_available())

# Prepare data
user_matrix, item_matrix, train_u, train_i, train_r, neg_candidates, u_cnt, user_rating_max = data_utils.load_train_data(
    args.data)
test_users, test_items = data_utils.load_test_ml1m()
eval_batch_size = 100 * 151
n_users, n_items = user_matrix.shape[0], user_matrix.shape[1]

user_array = user_matrix.toarray()
item_array = item_matrix.toarray()
user_idxlist, item_idxlist = list(range(n_users)), list(range(n_items))

# Model
model = JNCF(DF_layers, DI_layers, n_users, n_items,
             'concat').to(device)  # 'multi' or 'concat'

# Optimize
pair_loss_function = TOP1
point_loss_function = torch.nn.BCEWithLogitsLoss()
Exemple #7
0
import helpers

##############################################################################
# Settings
##############################################################################
CUDA = False

##############################################################################
# Load the dataset
##############################################################################
Data = namedtuple("Data", "corpus train dev test embeddings word_to_index")

data_utils.download_ask_ubuntu_dataset()
EMBEDDINGS, WORD_TO_INDEX = data_utils.load_part2_embeddings()
ASK_UBUNTU_CORPUS = data_utils.load_corpus(WORD_TO_INDEX)
ASK_UBUNTU_TRAIN_DATA = data_utils.load_train_data()
ASK_UBUNTU_DEV_DATA, ASK_UBUNTU_TEST_DATA = data_utils.load_eval_data()
ASK_UBUNTU_DATA = Data(ASK_UBUNTU_CORPUS, ASK_UBUNTU_TRAIN_DATA,\
                        ASK_UBUNTU_DEV_DATA, ASK_UBUNTU_TEST_DATA,\
                        EMBEDDINGS, WORD_TO_INDEX)

data_utils.download_android_dataset()
ANDROID_CORPUS = data_utils.load_android_corpus(WORD_TO_INDEX)
ANDROID_DEV_DATA, ANDROID_TEST_DATA = data_utils.load_android_eval_data()
ANDROID_DATA = Data(ANDROID_CORPUS, None,\
                      ANDROID_DEV_DATA, ANDROID_TEST_DATA,\
                      EMBEDDINGS, WORD_TO_INDEX)

##############################################################################
# Train and evaluate a baseline TFIDF model
##############################################################################
Exemple #8
0
import helpers

##############################################################################
# Settings
##############################################################################
CUDA = False

##############################################################################
# Load the dataset
##############################################################################
Data = namedtuple("Data", \
        "corpus train dev test embeddings word_to_index")
data_utils.download_ask_ubuntu_dataset()
EMBEDDINGS, WORD_TO_INDEX = data_utils.load_embeddings()
CORPUS = data_utils.load_corpus(WORD_TO_INDEX)
TRAIN_DATA = data_utils.load_train_data()
DEV_DATA, TEST_DATA = data_utils.load_eval_data()
DATA = Data(CORPUS, TRAIN_DATA, DEV_DATA, TEST_DATA,\
            EMBEDDINGS, WORD_TO_INDEX)

##############################################################################
# Train and evaluate the models for Part 1
##############################################################################
RESULTS = []
MARGINS = [0.2]
MAX_EPOCHS = 50
BATCH_SIZE = 32
FILTER_WIDTHS = [3]
POOL_METHOD = "average"
FEATURE_DIMS = [600]
DROPOUT_PS = [0.3]
Exemple #9
0
        model = Dave_orig()
        save_model_name = './Model1.h5'
    elif model_name == '2':
        # K.set_learning_phase(1)
        model = Dave_norminit()
        save_model_name = './Model2.h5'
    elif model_name == '3':
        # K.set_learning_phase(1)
        model = Dave_dropout()
        save_model_name = './Model3.h5'
    else:
        print(bcolors.FAIL + 'invalid model name, must one of 1, 2 or 3' +
              bcolors.ENDC)

    # the data, shuffled and split between train and test sets
    train_generator, samples_per_epoch = load_train_data(batch_size=batch_size,
                                                         shape=(100, 100))

    # trainig
    model.fit_generator(train_generator,
                        steps_per_epoch=math.ceil(samples_per_epoch * 1. /
                                                  batch_size),
                        epochs=nb_epoch,
                        workers=8,
                        use_multiprocessing=True)
    print(bcolors.OKGREEN + 'Model trained' + bcolors.ENDC)

    # evaluation
    K.set_learning_phase(0)
    test_generator, samples_per_epoch = load_test_data(batch_size=batch_size,
                                                       shape=(100, 100))
    model.evaluate_generator(test_generator,
Exemple #10
0
    _, _, _, submission_file = create_filenames(train_id)
    # Make the submission
    dsb.save_submission(dataset.ids, expit(predictions), test_img_sizes,
                        submission_file)


if __name__ == "__main__":
    args = parser.parse_args()
    # Load the train_config
    train_config = load_train_setup(args.train_id)
    trained_model = None
    PLOT = args.plot
    if args.train:
        # Load the train data
        train_ids, x_train, y_train = dsb.load_train_data(
            path_to_train="../input/train/",
            img_size=train_config["img_size"],
            num_channels=3)
        train_dataset = NpDataset(x=x_train, y=y_train, ids=train_ids)
        # train the models
        if not train_config["kfold"]:
            raise NotImplementedError("Non-kfold training is not implemented")
        trained_model = kfold(train_dataset,
                              train_config,
                              args.train_id,
                              num_completed=args.num_completed)

    if args.test:
        # Load the test data
        test_ids, x_test, sizes_test = dsb.load_test_data(
            path_to_test="../input/test/",
            img_size=train_config["img_size"],
Exemple #11
0
def main():
    """
    Main Function for Training a Image classifier on a directory
    containing sub folders train, valid , test with Images.
    Inputs are passed using command line.
    Type python train.py -h for help on usage.
    """
    
    # Parse and validate the cmdline arguments passed for Training.
    args, proceed = parse_args_train()
    
    if proceed:
        print('args in main:', args)
    else:
        print("\n Type python train.py -h for help on setting training "
                "parameters. Thank you! \n")
        return True
    
    # Load the Training data 
    dataloaders, class_to_idx = load_train_data(args.data_directory, 
                                                args.batch_sizes)
    
    # Build network
    layers= args.hidden_units + [args.num_classes]
    model = build_network(arch_name=args.arch, layers=layers, 
                          dropout=args.dropout,
                          hidden_activation=args.hidden_activation, 
                          output_activation=args.output_activation)
    
    # Check if cuda is available
    if args.gpu and torch.cuda.is_available():
        device = torch.device("cuda")
    elif not args.gpu:
        device = torch.device("cpu")
    else:
        print("cuda is available? :", torch.cuda.is_available())
        raise Exception('Error ! device CUDA is not available')
        
    # load the model from checkpoint, checkpoint for losses and 
    # model params for training restart.
    with active_session():
        if args.checkpoint:
            model, checkpoint = load_checkpoint(args.checkpoint)
        else:
            checkpoint = {}
        
        trained_model, checkpoint = train_network(model, device,
                                              criteria=args.criterion, 
                                              optimizr=args.optimizer,
                                              learn_rate=args.learning_rate,
                                              trainloader=dataloaders['train'], 
                                              validloader=dataloaders['valid'],
                                              epochs=args.epochs, 
                                              checkpoint=checkpoint)
        if args.save_dir:
            checkpoint['class_to_idx'] = class_to_idx
            checkpoint_filename = ""
            checkpoint_filename += 'checkpoint' 
            checkpoint_filename +=  str(checkpoint['last_epoch'])
            checkpoint_filename += '.pth'        
            checkpoint_filepath = os.path.join(args.save_dir, checkpoint_filename)
            checkpoint_model(checkpoint, checkpoint_filepath)
# Load the clusters
train_cluster_ids = np.load("../clusters/train_clusters.npz")
test_cluster_ids = np.load("../clusters/test_clusters.npz")

in_set = np.vectorize(lambda a, s: a in s)

# In[5]:

# Load the training data
test_ids, X_test, sizes_test = dsb.load_test_data(path_to_test=PATH_TO_TEST,
                                                  img_size=None,
                                                  num_channels=NUM_CHANNELS,
                                                  mode='rgb')
train_ids, X_train, Y_train = dsb.load_train_data(path_to_train=PATH_TO_TRAIN,
                                                  img_size=None,
                                                  num_channels=NUM_CHANNELS,
                                                  mode='rgb')
print("Number of training samples: %s" % len(train_ids))
print("X-train shape: {}".format(X_train.shape))
print("Y-train shape: {}".format(Y_train.shape))
print("X-test shape: {}".format(X_test.shape))

# Get indexes from clusters
train_clusters = np.zeros(NUM_TRAIN, dtype=int)
test_clusters = np.zeros(NUM_TEST, dtype=int)
train_clusters[in_set(train_ids,
                      {a
                       for a in train_cluster_ids["cluster_0"]})] = 0
train_clusters[in_set(train_ids,
                      {a
                       for a in train_cluster_ids["cluster_1"]})] = 1