Example #1
0
def train(dl, root_path, dataset, n_iter=500, iter_start=0, mod=0):
    torch.manual_seed(0)
    random.seed(0)
    #dl = pickle.load(open(root_path + 'dl_' + dataset + '.pk', 'rb'))
    model = SpatioTemporalModelNCF(dl.nu,
                                   dl.nv,
                                   dl.nt,
                                   sampling_list=dl.sampling_list,
                                   vid_coor_rad=dl.vid_coor_rad,
                                   vid_pop=dl.vid_pop)
    if iter_start != 0:
        model.load_state_dict(
            torch.load(root_path + 'model_ncf_' + str(mod) + '_' +
                       str(iter_start) + '.md'))
    optimizer = optim.Adam(model.parameters())
    criterion = NSNLLLoss()
    uids = dl.uid_records.keys()
    for iter in range(iter_start + 1, n_iter + 1):
        print_loss_total = 0
        random.shuffle(uids)
        for idx, uid in enumerate(uids):
            records_u = dl.uid_records[uid]
            optimizer.zero_grad()
            predicted_probs, _, _ = model(records_u, is_train=False, mod=mod)
            loss = criterion(predicted_probs)
            loss.backward()
            print_loss_total += loss.data[0]
            optimizer.step()
            if idx % 100 == 0:
                print 'uid: \t%d\tloss: %f' % (idx, print_loss_total)
        print iter, print_loss_total
        if iter % 5 == 0:
            torch.save(
                model.state_dict(),
                root_path + 'model_ncf_' + str(mod) + '_' + str(iter) + '.md')
Example #2
0
def train(dl,root_path, emb_dim=50, nb_cnt=100, n_iter=500, iter_start=0, mod=0):
    torch.manual_seed(0)
    random.seed(0)
    #dl = pickle.load(open(root_path + 'dl.pk', 'rb'))
    model = JNTM(dl.nu, dl.nv, emb_dim, nb_cnt=nb_cnt, sampling_list=dl.sampling_list, mod=mod)
    if iter_start != 0:
        model.load_state_dict(torch.load(root_path + 'model_jntm_' + str(mod) + '_' +str(iter_start) + '.md'))
    optimizer = optim.Adam(model.parameters())
    criterion = NSNLLLoss()
    uids = dl.uid_records.keys()
    for iter in range(iter_start + 1, n_iter + 1):
        print_loss_total = 0
        random.shuffle(uids)
        for idx, uid in enumerate(uids):
            records_u = dl.uid_records[uid]
            optimizer.zero_grad()
            #predicted_probs, _ = model(records_u, True)
            predicted_probs, _ = model(records_u, False)
            loss = criterion(predicted_probs)
            loss.backward()
            print_loss_total += loss.data[0]
            optimizer.step()
            if idx % 50 == 0:
                print '\t%d\t%f' % (idx, print_loss_total)
        print iter, print_loss_total
        if iter % 1 == 0:
            print 1
            print root_path
            torch.save(model.state_dict(), root_path + 'model_jntm_'+ str(mod) + '_' + str(iter) + '.md')
Example #3
0
def train(root_path, dataset, n_iter=500, iter_start=0, mod=0):
    torch.manual_seed(0)
    random.seed(0)
    dl = pickle.load(open(root_path + 'dl_' + dataset + '.pk', 'rb'))
    for _, records_u in dl.uid_records.items():
        records_u.summarize()
    model = AttentionModelNew(dl.nu, dl.nv, dl.nt, sampling_list=dl.sampling_list, vid_coor_nor=dl.vid_coor_nor, vid_pop=dl.vid_pop, mod=mod)
    if iter_start != 0:
        model.load_state_dict(torch.load(root_path + 'model_attention_nobias_new_' + str(mod) + '_' + str(iter_start) + '.md'))
    optimizer = optim.Adam(model.parameters())
    criterion = NSNLLLoss()
    uids = dl.uid_records.keys()
    for iter in range(iter_start + 1, n_iter + 1):
        print_loss_total = 0
        random.shuffle(uids)
        for idx, uid in enumerate(uids):
            records_u = dl.uid_records[uid]
            optimizer.zero_grad()
            predicted_probs, _, _ = model(records_u, is_train=True)
            loss = criterion(predicted_probs)
            loss.backward()
            print_loss_total += loss.data[0]
            optimizer.step()
            if idx % 50 == 0:
                print 'uid: \t%d\tloss: %f' % (idx, print_loss_total)
        print iter, print_loss_total
        if iter % 1 == 0:
            torch.save(model.state_dict(), root_path + 'model_attention_nobias_new_' + str(mod) + '_' + str(iter) + '.md')
Example #4
0
 def __init__(self, model, opt, model_type):
     self.opt = opt
     self.train_log_file = opt['train_log_file']
     self.n_epoch = opt['n_epoch']
     self.batch_size = opt['batch_size']
     self.model_type = model_type
     self.save_gap = opt['save_gap']
     self.model = model
     self.criterion = NSNLLLoss()
     self.optimizer = opt.Adam(self.model.parameters())
Example #5
0
def train(root_path,
          emb_dim_all=50,
          emb_dim_v=50,
          emb_dim_t=50,
          emb_dim_w=50,
          nb_cnt=100,
          n_iter=500,
          iter_start=0,
          mod=0):
    dl = pickle.load(open(root_path + 'dl.pk', 'rb'))
    model = SERM(dl.nu,
                 dl.nv,
                 dl.nt,
                 dl.nw,
                 emb_dim_all,
                 emb_dim_v,
                 emb_dim_t,
                 emb_dim_w,
                 nb_cnt=nb_cnt,
                 sampling_list=dl.sampling_list,
                 glove_path=root_path + 'glove.txt',
                 mod=mod)
    if iter_start != 0:
        model.load_state_dict(
            torch.load(root_path + 'model_serm_' + str(mod) + '_' +
                       str(iter_start) + '.md'))
    optimizer = optim.Adam(model.parameters())
    criterion = NSNLLLoss()
    uids = dl.uid_records.keys()
    for iter in range(iter_start + 1, n_iter + 1):
        print_loss_total = 0
        random.shuffle(uids)
        for idx, uid in enumerate(uids):
            records_u = dl.uid_records[uid]
            optimizer.zero_grad()
            predicted_probs, _ = model(records_u, True)
            loss = criterion(predicted_probs)
            loss.backward()
            print_loss_total += loss.data[0]
            optimizer.step()
            if idx % 50 == 0:
                print '\t%d\t%f' % (idx, print_loss_total)
        print iter, print_loss_total
        if iter % 5 == 0:
            torch.save(
                model.state_dict(),
                root_path + 'model_serm_' + str(mod) + '_' + str(iter) + '.md')
Example #6
0
def train(root_path,
          emb_dim=50,
          nb_cnt=100,
          n_iter=500,
          iter_start=0,
          dropout=0.5,
          mod=0,
          mod_merge=0,
          dataset=None):
    torch.manual_seed(0)
    random.seed(0)
    dl = pickle.load(open(root_path + 'dl_' + dataset + '.pk', 'rb'))
    model = TimeAwareModel(dl.nu,
                           dl.nv,
                           emb_dim,
                           nb_cnt=nb_cnt,
                           sampling_list=dl.sampling_list,
                           vid_coor_rad=dl.vid_coor_rad,
                           dropout=dropout,
                           mod=mod,
                           mod_merge=mod_merge)
    if iter_start != 0:
        model.load_state_dict(
            torch.load(root_path + 'model_tam_' + str(mod) + '_' +
                       str(mod_merge) + '_' + str(iter_start) + '.md'))
    optimizer = optim.Adam(model.parameters())
    criterion = NSNLLLoss()
    uids = dl.uid_records.keys()
    for iter in range(iter_start + 1, n_iter + 1):
        print_loss_total = 0
        # random.shuffle(uids)
        for idx, uid in enumerate(uids):
            records_u = dl.uid_records[uid]
            optimizer.zero_grad()
            predicted_probs, _, _ = model(records_u, True)
            loss = criterion(predicted_probs)
            loss.backward()
            print_loss_total += loss.data[0]
            optimizer.step()
            if idx % 100 == 0:
                print 'uid: \t%d\tloss: %f' % (idx, print_loss_total)
        print iter, print_loss_total
        if iter % 5 == 0:
            torch.save(
                model.state_dict(), root_path + 'model_tam_' + str(mod) + '_' +
                str(mod_merge) + '_' + str(iter) + '.md')
Example #7
0
def train(root_path,
          dataset,
          adapt_bandwidth=True,
          bandwidth_global=0.07,
          adapt_nn=5,
          n_iter=500,
          iter_start=0):
    torch.manual_seed(0)
    random.seed(0)
    dl = pickle.load(open(root_path + 'dl_' + dataset + '.pk', 'rb'))
    for _, records_u in dl.uid_records.items():
        records_u.summarize()
    model = TimeAwareCF(dl.nv,
                        dl.nt,
                        vid_coor_nor=dl.vid_coor_nor,
                        vid_pop=dl.vid_pop,
                        adapt_bandwidth=adapt_bandwidth,
                        bandwidth_global=bandwidth_global,
                        adapt_nn=adapt_nn)
    if iter_start != 0:
        model.load_state_dict(
            torch.load(root_path + 'model_tcf_' + str(adapt_bandwidth) + '_' +
                       str(bandwidth_global) + '_' + str(nn) + '_' +
                       str(iter_start) + '.md'))
    optimizer = optim.Adam(model.parameters())
    criterion = NSNLLLoss()
    uids = dl.uid_records.keys()
    for iter in range(iter_start + 1, n_iter + 1):
        print_loss_total = 0
        random.shuffle(uids)
        for idx, uid in enumerate(uids):
            records_u = dl.uid_records[uid]
            optimizer.zero_grad()
            predicted_probs, _, _ = model(records_u, is_train=True)
            loss = criterion(predicted_probs)
            loss.backward()
            print_loss_total += loss.data[0]
            optimizer.step()
            if idx % 10 == 0:
                print 'uid: \t%d\tloss: %f' % (idx, print_loss_total)
        print iter, print_loss_total
        if iter % 5 == 0:
            torch.save(
                model.state_dict(), root_path + 'model_tcf_' +
                str(adapt_bandwidth) + '_' + str(bandwidth_global) + '_' +
                str(adapt_nn) + '_' + str(iter) + '.md')