示例#1
0
if __name__ == '__main__':
    torch.cuda.set_device(6)
    i_genre = np.load('/home2/zh/data/ml-1m/i_genre.npy')
    i_other = np.load('/home2/zh/data/ml-1m/i_other.npy')
    ui = np.load('/home2/zh/data/ml-1m/ui.npy')
    x_genre = np.load('/home2/zh/data/ml-1m/x_genre.npy')
    x_other = np.load('/home2/zh/data/ml-1m/x_other.npy')
    y = np.load('/home2/zh/data/ml-1m/y.npy')
    config = {'num_embeddings': 3529,
            'num_ctns': 1,
            'fieldnum': 7,
            'embedding_dim': 32,
            'headnum': 8,
            'attention_dim': 32,
            'seed': 81192}
    setup_seed(config['seed'])
    model = AutoInt(config).cuda().double()
    user_set = np.array(list(set(ui[:,0])))
    usernum = len(user_set)
    perm = random.permutation(usernum)
    train_usernum = int(0.8*usernum)
    valid_usernum = int(0.9*usernum)
    idx0 = np.where(np.isin(ui[:,0], user_set[perm[0:train_usernum]]))[0]
    idx = idx0[random.permutation(idx0.shape[0])[:3200]]
    onehot_i = torch.tensor(i_other[idx,:-1]).cuda()
    onehot_x = torch.tensor(x_other[idx,:-1]).cuda()
    multihot_i = torch.tensor(i_genre[idx]).cuda()
    multihot_x = torch.tensor(x_genre[idx]).cuda()
    multihot_list = [(multihot_i, multihot_x)]
    ctns = torch.tensor(x_other[idx,-1:]).cuda()
    # label = torch.tensor(y[idx]).cuda().double()
示例#2
0
    print(f'embedding matrix shape: {pretrained_embeddings.shape}')
    print(f'relatedness matrix shape: {related_embeddings.shape}')

    return train_dl, valid_dl, pretrained_embeddings, related_embeddings


def get_res(config, train_dl, valid_dl, pretrained_embeddings, related_embeddings):
    model = LSTMClassifier(config, pretrained_embeddings, related_embeddings)
    model.cuda()
    top5, top1 = util.train_model(model, train_dl, valid_dl, config)
    del model
    return top5, top1


if __name__ == "__main__":
    util.setup_seed(6)
    parser = argparse.ArgumentParser(description='Knowledge in Labels Project')
    parser.add_argument('-d', '--data', help='data name', default='imdb',
                        choices=['agnews', 'imdb', 'newsgroup'])
    parser.add_argument('-g', '--gpu', help='gpu id', type=int, default=0)
    args = parser.parse_args()

    with open('settings.json', 'r', encoding='utf-8') as f:
        settings = json.load(f)
    config = settings["lstm"][args.data]
    config["epochs"] = 20
    config["embed_dropout_prob"] = 0.2
    config["vocab_size"] = None
    config["data_name"] = args.data
    config["embed_dim"] = 300
    torch.cuda.set_device(args.gpu)
示例#3
0
    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.gpu_id != '-1':
        cfg.GPU_ID = args.gpu_id
    else:
        cfg.CUDA = False

    if args.data_dir != '':
        cfg.DATA_DIR = args.data_dir

    print('Using config')
    pprint.pprint(cfg)

    if args.manualSeed is None:
        args.manualSeed = random.randint(1, 10000)
    setup_seed(args.manualSeed)

    now = datetime.datetime.now(dateutil.tz.tzlocal())
    timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
    output_dir = f'./output/{cfg.Data.dataset}_{cfg.config_name}_{timestamp}'

    num_gpu = len(cfg.GPU_ID.split(','))
    data = Data()
    dataset = (data.trainloader, data.validloader, data.testloader)

    algo = Trainer(output_dir, dataset)
    start_t = time.time()
    algo.train()

    end_t = time.time()
    print(f'Total time for training:{end_t - start_t}')
示例#4
0
文件: train.py 项目: momogasuki/akie
        with torch.no_grad():
            pred, loss = model(task_data, label, spt_idx, qry_idx, phase)
        preds.append(pred)
        losses.append(loss)
        labels.append(label[qry_idx])
    auc = roc_auc_score(np.array(torch.cat(labels).detach().cpu()),
                        np.array(torch.cat(preds).detach().cpu()))
    loss = torch.stack(losses).mean().item()
    model.train()
    return auc, loss


if __name__ == '__main__':
    args = parse_args()
    torch.cuda.set_device(args.gpunum)
    setup_seed(args.seed)
    dataloader = MyDataLoader(args)
    model = MetaLearner(args).cuda().double()
    optimizer = optim.SGD([{
        'params': model.net.parameters(),
        'lr': args.learning_rate[0]
    }, {
        'params':
        list(model.parameters())[len(list(model.net.parameters())):],
        'lr':
        args.learning_rate[1]
    }])
    traucs, vaaucs, trloss, valoss = [], [], [], []
    labels_chkp, preds_chkp, losses_chkp = [], [], []
    maxauc = 0
    test_auc, test_loss = 0, 0