def main(args):
    mnasnet = models.mnasnet1_0(pretrained=True).to(device).eval()
    cvae = CVAE(1000, 128, args.n_class * 2, args.n_class).to(device)
    cvae.encoder.eval()
    regressor = Regressor().to(device)
    if Path(args.cvae_resume_model).exists():
        print("load cvae model:", args.cvae_resume_model)
        cvae.load_state_dict(torch.load(args.cvae_resume_model))

    if Path(args.regressor_resume_model).exists():
        print("load regressor model:", args.regressor_resume_model)
        regressor.load_state_dict(torch.load(args.regressor_resume_model))

    image_label = pandas.read_csv(
        Path(args.data_root, args.metadata_file_name.format(
            args.subset))).sample(frac=1, random_state=551)[:250]
    image_label["class"] = image_label["class"] - 1

    dataset = WBCDataset(args.n_class,
                         image_label[:250].values,
                         args.data_root,
                         subset=args.subset,
                         train=True)
    data_loader = loader(dataset, args.batch_size, True)
    cvae_optimizer = RAdam(cvae.parameters(), weight_decay=1e-3)
    regressor_optimizer = RAdam(regressor.parameters(), weight_decay=1e-3)
    train(args, mnasnet, cvae, regressor, cvae_optimizer, regressor_optimizer,
          data_loader)
def main(args):
    mnasnet1_0 = models.mnasnet1_0(pretrained=True).to(device).eval()
    model = CVAE(1000, 128, 128, args.n_class, 128).to(device)

    image_label = pandas.read_csv(
        Path(args.data_root, 
             args.metadata_file_name.format(args.subset))
    ).sample(frac=1, random_state=551)[:250]
    image_label["class"] = image_label["class"] - 1
    dataset = WBCDataset(image_label.values, args.data_root, subset=args.subset)

    data_loader = loader(dataset, args.batch_size, True)
    optimizer = RAdam(model.parameters(), weight_decay=1e-3)
    train(args, mnasnet1_0, model, optimizer, data_loader)
Exemple #3
0
    # the parameter name suggest what to evaluate on
    model_config[
        "test_user_item_interaction_dict"] = val_user_item_interaction_dict
    model_config[
        "train_user_item_interaction_dict"] = train_user_item_interaction_dict

    ##### define the model #####
    if args.model_type == "cvae":
        model = CVAE(config=model_config).to(device)
        print(model)
        #criterion = torch.nn.MSELoss()
        criterion = torch.nn.CrossEntropyLoss()
        # size_average is set to False, the losses are instead summed for each minibatch
        #criterion.size_average = False
        learning_rate = 1e-4
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=learning_rate,
                                     weight_decay=1e-5)

    # train the model
    if args.exp_type == "train":
        train.train_and_val(model, train_dataloader, val_dataloader, \
                      criterion, optimizer, args, model_config)

    if args.exp_type == "evaluate":
        eval_config = {
            "num_items": num_items,
            "train_user_item_interaction_dict":
            train_user_item_interaction_dict,
            "test_user_item_interaction_dict": test_user_item_interaction_dict,
            "exp_save_models_dir":
        'test':
        torch.utils.data.DataLoader(test_dataset,
                                    batch_size=opts.batch_size,
                                    shuffle=False)
    }

    cvae = CVAE(opts.latent_size, device).to(device)
    dis = Discriminator().to(device)
    classifier = Classifier(opts.latent_size).to(device)
    classer = CLASSIFIERS().to(device)

    print(cvae)
    print(dis)
    print(classifier)

    optimizer_cvae = torch.optim.Adam(cvae.parameters(),
                                      lr=opts.lr,
                                      betas=(opts.b1, opts.b2),
                                      weight_decay=opts.weight_decay)
    optimizer_dis = torch.optim.Adam(dis.parameters(),
                                     lr=opts.lr,
                                     betas=(opts.b1, opts.b2),
                                     weight_decay=opts.weight_decay)
    optimizer_classifier = torch.optim.Adam(classifier.parameters(),
                                            lr=opts.lr,
                                            betas=(opts.b1, opts.b2),
                                            weight_decay=opts.weight_decay)

    i = 1
    while os.path.isdir('./ex/' + str(i)):
        i += 1