def eval(model, dev_loader, encoder, gpu_mode):
    model.eval()
    val_loss = 0

    print('evaluating model...')
    top1 = imsitu_scorer(encoder, 1, 3)
    top5 = imsitu_scorer(encoder, 5, 3)
    with torch.no_grad():
        mx = len(dev_loader)
        for i, (img, verb, roles, labels) in enumerate(dev_loader):
            print("{}/{} batches\r".format(i + 1, mx)),
            '''im_data = torch.squeeze(im_data,0)
            im_info = torch.squeeze(im_info,0)
            gt_boxes = torch.squeeze(gt_boxes,0)
            num_boxes = torch.squeeze(num_boxes,0)
            verb = torch.squeeze(verb,0)
            roles = torch.squeeze(roles,0)
            labels = torch.squeeze(labels,0)'''

            if gpu_mode >= 0:
                img = torch.autograd.Variable(img.cuda())
                roles = torch.autograd.Variable(roles.cuda())
                verb = torch.autograd.Variable(verb.cuda())
                labels = torch.autograd.Variable(labels.cuda())
            else:
                img = torch.autograd.Variable(img)
                verb = torch.autograd.Variable(verb)
                roles = torch.autograd.Variable(roles)
                labels = torch.autograd.Variable(labels)

            verb_predict_ids, role_predict_ids = model.forward_eval_rolebeam(
                img)
            '''loss = model.calculate_eval_loss(verb_predict, verb, role_predict, labels)
            val_loss += loss.item()'''
            top1.add_point_eval_rolebeam(verb_predict_ids, verb,
                                         role_predict_ids, labels)
            top5.add_point_eval_rolebeam(verb_predict_ids, verb,
                                         role_predict_ids, labels)

            del verb_predict_ids, role_predict_ids, img, verb, roles, labels
            break

    #return top1, top5, val_loss/mx

    return top1, top5, 0
def train(model,
          train_loader,
          dev_loader,
          traindev_loader,
          optimizer,
          scheduler,
          max_epoch,
          model_dir,
          encoder,
          gpu_mode,
          clip_norm,
          lr_max,
          model_name,
          args,
          eval_frequency=4000):
    model.train()
    train_loss = 0
    total_steps = 0
    print_freq = 400
    dev_score_list = []
    time_all = time.time()
    '''if model.gpu_mode >= 0 :
        ngpus = 2
        device_array = [i for i in range(0,ngpus)]

        pmodel = torch.nn.DataParallel(model, device_ids=device_array)
    else:
        pmodel = model'''
    pmodel = model
    '''if scheduler.get_lr()[0] < lr_max:
        scheduler.step()'''

    top1 = imsitu_scorer(encoder, 1, 3)
    top5 = imsitu_scorer(encoder, 5, 3)
    '''print('init param data check :')
    for f in model.parameters():
        if f.requires_grad:
            print(f.data.size())'''

    for epoch in range(max_epoch):

        #print('current sample : ', i, img.size(), verb.size(), roles.size(), labels.size())
        #sizes batch_size*3*height*width, batch*504*1, batch*6*190*1, batch*3*6*lebale_count*1
        mx = len(train_loader)
        for i, (img, verb, roles, labels) in enumerate(train_loader):
            #print("epoch{}-{}/{} batches\r".format(epoch,i+1,mx)) ,
            t0 = time.time()
            t1 = time.time()
            total_steps += 1

            if gpu_mode >= 0:
                img = torch.autograd.Variable(img.cuda())
                roles = torch.autograd.Variable(roles.cuda())
                verb = torch.autograd.Variable(verb.cuda())
                labels = torch.autograd.Variable(labels.cuda())
            else:
                img = torch.autograd.Variable(img)
                verb = torch.autograd.Variable(verb)
                roles = torch.autograd.Variable(roles)
                labels = torch.autograd.Variable(labels)
            '''print('all inputs')
            print(img)
            print('=========================================================================')
            print(verb)
            print('=========================================================================')
            print(roles)
            print('=========================================================================')
            print(labels)'''

            verb_predict, role_predict = pmodel(img, verb, roles)
            #verb_predict, rol1pred, role_predict = pmodel.forward_eval5(img)
            #print ("forward time = {}".format(time.time() - t1))
            t1 = time.time()
            '''g = make_dot(verb_predict, model.state_dict())
            g.view()'''

            loss = model.calculate_loss(verb_predict, verb, role_predict,
                                        labels, args)
            #loss = model.calculate_eval_loss_new(verb_predict, verb, rol1pred, labels, args)
            #loss = loss_ * random.random() #try random loss
            #print ("loss time = {}".format(time.time() - t1))
            t1 = time.time()
            #print('current loss = ', loss)

            loss.backward()
            #print ("backward time = {}".format(time.time() - t1))

            #torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm)
            '''for param in filter(lambda p: p.requires_grad,model.parameters()):
                print(param.grad.data.sum())'''

            #start debugger
            #import pdb; pdb.set_trace()

            optimizer.step()
            optimizer.zero_grad()
            '''print('grad check :')
            for f in model.parameters():
                print('data is')
                print(f.data)
                print('grad is')
                print(f.grad)'''

            train_loss += loss.item()

            #top1.add_point_eval5(verb_predict, verb, role_predict, labels)
            #top5.add_point_eval5(verb_predict, verb, role_predict, labels)

            top1.add_point(verb_predict, verb, role_predict, labels)
            top5.add_point(verb_predict, verb, role_predict, labels)

            if total_steps % print_freq == 0:
                top1_a = top1.get_average_results()
                top5_a = top5.get_average_results()
                print("{},{},{}, {} , {}, loss = {:.2f}, avg loss = {:.2f}".
                      format(total_steps - 1, epoch, i,
                             utils.format_dict(top1_a, "{:.2f}", "1-"),
                             utils.format_dict(top5_a, "{:.2f}", "5-"),
                             loss.item(), train_loss /
                             ((total_steps - 1) % eval_frequency)))

            if total_steps % eval_frequency == 0:
                top1, top5, val_loss = eval(model, dev_loader, encoder,
                                            gpu_mode)
                model.train()

                top1_avg = top1.get_average_results()
                top5_avg = top5.get_average_results()

                avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
                            top5_avg["value"] + top5_avg["value-all"]
                avg_score /= 8

                print('Dev {} average :{:.2f} {} {}'.format(
                    total_steps - 1, avg_score * 100,
                    utils.format_dict(top1_avg, '{:.2f}', '1-'),
                    utils.format_dict(top5_avg, '{:.2f}', '5-')))
                #print('Dev loss :', val_loss)

                dev_score_list.append(avg_score)
                max_score = max(dev_score_list)

                if max_score == dev_score_list[-1]:
                    torch.save(
                        model.state_dict(), model_dir +
                        "/{}_macnet4layer_gtv_finetune.model".format(
                            model_name))
                    print('New best model saved! {0}'.format(max_score))

                #eval on the trainset
                '''top1, top5, val_loss = eval(model, traindev_loader, encoder, gpu_mode)
                model.train()

                top1_avg = top1.get_average_results()
                top5_avg = top5.get_average_results()

                avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
                            top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
                avg_score /= 8

                print ('TRAINDEV {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,
                                                                  utils.format_dict(top1_avg,'{:.2f}', '1-'),
                                                                  utils.format_dict(top5_avg, '{:.2f}', '5-')))'''

                print('current train loss', train_loss)
                train_loss = 0
                top1 = imsitu_scorer(encoder, 1, 3)
                top5 = imsitu_scorer(encoder, 5, 3)

            del verb_predict, role_predict, loss, img, verb, roles, labels
            #break
        print('Epoch ', epoch, ' completed!')
        scheduler.step()
def train(model,
          train_loader,
          dev_loader,
          traindev_loader,
          optimizer,
          scheduler,
          max_epoch,
          model_dir,
          encoder,
          gpu_mode,
          clip_norm,
          lr_max,
          model_name,
          args,
          eval_frequency=2000):
    model.train()
    train_loss = 0
    total_steps = 0
    print_freq = 400
    dev_score_list = []
    '''if model.gpu_mode >= 0 :
        ngpus = 2
        device_array = [i for i in range(0,ngpus)]

        pmodel = torch.nn.DataParallel(model, device_ids=device_array)
    else:
        pmodel = model'''
    pmodel = model

    top1 = imsitu_scorer(encoder, 1, 3)
    top5 = imsitu_scorer(encoder, 5, 3)
    '''print('init param data check :')
    for f in model.parameters():
        if f.requires_grad:
            print(f.data.size())'''

    for epoch in range(max_epoch):
        #print('current sample : ', i, img.size(), verb.size(), roles.size(), labels.size())
        #sizes batch_size*3*height*width, batch*504*1, batch*6*190*1, batch*3*6*lebale_count*1
        mx = len(train_loader)
        for i, (_id, img, verb, verbq) in enumerate(train_loader):
            #print("epoch{}-{}/{} batches\r".format(epoch,i+1,mx)) ,
            total_steps += 1
            if gpu_mode >= 0:
                img = torch.autograd.Variable(img.cuda())
                verb = torch.autograd.Variable(verb.cuda())
                verbq = torch.autograd.Variable(verbq.cuda())
            else:
                img = torch.autograd.Variable(img)
                verb = torch.autograd.Variable(verb)
                verbq = torch.autograd.Variable(verbq)

            #optimizer.zero_grad()
            '''print('all inputs')
            print(img)
            print('=========================================================================')
            print(verb)
            print('=========================================================================')
            print(roles)
            print('=========================================================================')
            print(labels)'''

            verb_predict = pmodel(img, verbq)
            '''g = make_dot(verb_predict, model.state_dict())
            g.view()'''

            loss = model.calculate_loss(verb_predict, verb)
            #print('current loss = ', loss)

            loss.backward()

            # torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm)

            optimizer.step()
            optimizer.zero_grad()

            train_loss += loss.item()

            top1.add_point_verb_only(verb_predict, verb)
            top5.add_point_verb_only(verb_predict, verb)

            if total_steps % print_freq == 0:
                top1_a = top1.get_average_results()
                top5_a = top5.get_average_results()
                print("{},{},{}, {} , {}, loss = {:.2f}, avg loss = {:.2f}".
                      format(total_steps - 1, epoch, i,
                             utils.format_dict(top1_a, "{:.2f}", "1-"),
                             utils.format_dict(top5_a, "{:.2f}", "5-"),
                             loss.item(), train_loss /
                             ((total_steps - 1) % eval_frequency)))

            if total_steps % eval_frequency == 0:
                top1, top5, val_loss = eval(model, dev_loader, encoder,
                                            gpu_mode)
                model.train()

                top1_avg = top1.get_average_results()
                top5_avg = top5.get_average_results()
                #todo : top 5

                avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
                            top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
                #avg_score = top1_avg["value*"]
                avg_score /= 2

                print('Dev {} average :{:.2f} {} {}'.format(
                    total_steps - 1, avg_score * 100,
                    utils.format_dict(top1_avg, '{:.2f}', '1-'),
                    utils.format_dict(top5_avg, '{:.2f}', '5-')))
                #print('Dev loss :', val_loss)

                dev_score_list.append(avg_score)
                max_score = max(dev_score_list)

                if max_score == dev_score_list[-1]:
                    torch.save(
                        model.state_dict(), model_dir +
                        "/{}_agent2verbq_td_predagent_agentobj.model".format(
                            model_name))
                    print('New best model saved! {0}'.format(max_score))

                #eval on the trainset
                '''top1, top5, val_loss = eval(model, traindev_loader, encoder, gpu_mode)
                model.train()

                top1_avg = top1.get_average_results()
                top5_avg = top5.get_average_results()

                avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
                            top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
                avg_score /= 8

                print ('TRAINDEV {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,
                                                                  utils.format_dict(top1_avg,'{:.2f}', '1-'),
                                                                  utils.format_dict(top5_avg, '{:.2f}', '5-')))'''

                print('current train loss', train_loss)
                train_loss = 0
                top1 = imsitu_scorer(encoder, 1, 3)
                top5 = imsitu_scorer(encoder, 5, 3)

            del loss, img
            #break
        print('Epoch ', epoch, ' completed!')
        scheduler.step()
def train(model,
          train_loader,
          dev_loader,
          optimizer,
          scheduler,
          max_epoch,
          model_dir,
          encoder,
          gpu_mode,
          eval_frequency=500):
    model.train()
    train_loss = 0
    total_steps = 0
    print_freq = 50
    dev_score_list = []

    top1 = imsitu_scorer(encoder, 1, 3)
    top5 = imsitu_scorer(encoder, 5, 3)
    '''print('init param data check :')
    for f in model.parameters():
        if f.requires_grad:
            print(f.data.size())'''

    for epoch in range(max_epoch):
        #print('current sample : ', i, img.size(), verb.size(), roles.size(), labels.size())
        #sizes batch_size*3*height*width, batch*504*1, batch*6*190*1, batch*3*6*lebale_count*1
        mx = len(train_loader)
        for i, (img, verb, roles, labels) in enumerate(train_loader):
            #print("epoch{}-{}/{} batches\r".format(epoch,i+1,mx)) ,
            total_steps += 1

            if gpu_mode >= 0:
                img = torch.autograd.Variable(img.cuda())
                roles = torch.autograd.Variable(roles.cuda())
                verb = torch.autograd.Variable(verb.cuda())
                labels = torch.autograd.Variable(labels.cuda())
            else:
                img = torch.autograd.Variable(img)
                verb = torch.autograd.Variable(verb)
                roles = torch.autograd.Variable(roles)
                labels = torch.autograd.Variable(labels)

            optimizer.zero_grad()

            verb_predict, role_predict = model(img, verb, roles)

            loss = model.calculate_loss(verb_predict, verb, role_predict,
                                        labels)
            #print('current loss = ', loss)

            loss.backward()
            optimizer.step()
            '''print('grad check :')
            for f in model.parameters():
                print('data is')
                print(f.data)
                print('grad is')
                print(f.grad)'''

            train_loss += loss.data.item()

            top1.add_point(verb_predict, verb, role_predict, labels, roles)
            top5.add_point(verb_predict, verb, role_predict, labels, roles)

            if total_steps % print_freq == 0:
                top1_a = top1.get_average_results()
                top5_a = top5.get_average_results()
                print("{},{},{}, {} , {}, loss = {:.2f}, avg loss = {:.2f}".
                      format(total_steps - 1, epoch, i,
                             utils.format_dict(top1_a, "{:.2f}", "1-"),
                             utils.format_dict(top5_a, "{:.2f}", "5-"),
                             loss.item(), train_loss /
                             ((total_steps - 1) % eval_frequency)))

            if total_steps % eval_frequency == 0:
                top1, top5, val_loss = eval(model, dev_loader, encoder,
                                            gpu_mode)
                model.train()

                top1_avg = top1.get_average_results()
                top5_avg = top5.get_average_results()

                avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
                            top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
                avg_score /= 8

                print('Dev {} average :{:.2f} {} {}'.format(
                    total_steps - 1, avg_score * 100,
                    utils.format_dict(top1_avg, '{:.2f}', '1-'),
                    utils.format_dict(top5_avg, '{:.2f}', '5-')))
                print('Dev loss :', val_loss)

                dev_score_list.append(avg_score)
                max_score = max(dev_score_list)

                if max_score == dev_score_list[-1]:
                    checkpoint_name = os.path.join(
                        model_dir, '{}_devloss_cnngraph_{}.h5'.format(
                            'baseline', len(dev_score_list)))
                    utils.save_net(checkpoint_name, model)
                    print('New best model saved! {0}'.format(max_score))

                print('current train loss', train_loss)
                train_loss = 0
                top1 = imsitu_scorer(encoder, 1, 3)
                top5 = imsitu_scorer(encoder, 5, 3)

            del verb_predict, role_predict, loss, img, verb, roles, labels
            #break
        scheduler.step()
        print('Epoch ', epoch, ' completed!')
Exemple #5
0
def eval(model, checkpoint_list, model_dir, model_name, dev_loader, encoder,
         gpu_mode):
    model.eval()
    val_loss = 0

    model_list = []
    for chkp in checkpoint_list:
        new_model = model
        new_model.load_state_dict(
            torch.load(model_dir + model_name.format(chkp)))
        model_list.append(new_model)

    print('evaluating model...')
    top1 = imsitu_scorer(encoder, 1, 3)
    top5 = imsitu_scorer(encoder, 5, 3)
    with torch.no_grad():
        mx = len(dev_loader)
        for i, (img, verb, roles, labels) in enumerate(dev_loader):
            #print("{}/{} batches\r".format(i+1,mx)) ,
            '''im_data = torch.squeeze(im_data,0)
            im_info = torch.squeeze(im_info,0)
            gt_boxes = torch.squeeze(gt_boxes,0)
            num_boxes = torch.squeeze(num_boxes,0)
            verb = torch.squeeze(verb,0)
            roles = torch.squeeze(roles,0)
            labels = torch.squeeze(labels,0)'''

            if gpu_mode >= 0:
                img = torch.autograd.Variable(img.cuda())
                roles = torch.autograd.Variable(roles.cuda())
                verb = torch.autograd.Variable(verb.cuda())
                labels = torch.autograd.Variable(labels.cuda())
            else:
                img = torch.autograd.Variable(img)
                verb = torch.autograd.Variable(verb)
                roles = torch.autograd.Variable(roles)
                labels = torch.autograd.Variable(labels)

            batch_size = img.size(0)
            verb_count = 504
            for j in range(0, len(model_list)):
                if j == 0:
                    verb_predict = model(img)
                else:
                    verb_predict = torch.cat(
                        (verb_predict.clone(), model(img)), 1)

            verb_pred_all = verb_predict.view(batch_size, -1, verb_count)
            #print('verb_pred_all', verb_pred_all.size())
            ensemble = torch.mean(verb_pred_all, 1)
            #print('ensemble', ensemble.size())
            '''loss = model.calculate_eval_loss(verb_predict, verb, role_predict, labels)
            val_loss += loss.item()'''
            top1.add_point_verb_only(ensemble, verb)
            top5.add_point_verb_only(ensemble, verb)

            del verb_predict, verb_pred_all, ensemble, img, verb, roles, labels
            #break

    #return top1, top5, val_loss/mx

    return top1, top5, 0
Exemple #6
0
def train(model,
          train_loader,
          dev_loader,
          traindev_loader,
          optimizer,
          scheduler,
          max_epoch,
          model_dir,
          encoder,
          gpu_mode,
          clip_norm,
          lr_max,
          checkpoint_at,
          eval_frequency=4000):
    model.train()
    train_loss = 0
    total_steps = 0
    print_freq = 5
    dev_score_list = []
    checkp_list = []
    '''if model.gpu_mode >= 0 :
        ngpus = 2
        device_array = [i for i in range(0,ngpus)]

        pmodel = torch.nn.DataParallel(model, device_ids=device_array)
    else:
        pmodel = model'''
    pmodel = model
    '''if scheduler.get_lr()[0] < lr_max:
        scheduler.step()'''

    top1 = imsitu_scorer(encoder, 1, 3)
    top5 = imsitu_scorer(encoder, 5, 3)
    '''print('init param data check :')
    for f in model.parameters():
        if f.requires_grad:
            print(f.data.size())'''

    for epoch in range(max_epoch):
        #print('current sample : ', i, img.size(), verb.size(), roles.size(), labels.size())
        #sizes batch_size*3*height*width, batch*504*1, batch*6*190*1, batch*3*6*lebale_count*1
        mx = len(train_loader)
        for i, (img, verb, roles, labels) in enumerate(train_loader):
            #print("epoch{}-{}/{} batches\r".format(epoch,i+1,mx)) ,
            total_steps += 1

            if gpu_mode >= 0:
                img = torch.autograd.Variable(img.cuda())
                roles = torch.autograd.Variable(roles.cuda())
                verb = torch.autograd.Variable(verb.cuda())
                labels = torch.autograd.Variable(labels.cuda())
            else:
                img = torch.autograd.Variable(img)
                verb = torch.autograd.Variable(verb)
                roles = torch.autograd.Variable(roles)
                labels = torch.autograd.Variable(labels)

            #optimizer.zero_grad()
            '''print('all inputs')
            print(img)
            print('=========================================================================')
            print(verb)
            print('=========================================================================')
            print(roles)
            print('=========================================================================')
            print(labels)'''

            verb_predict = pmodel(img)
            '''g = make_dot(verb_predict, model.state_dict())
            g.view()'''

            loss = model.calculate_loss(verb_predict, verb)
            #print('current loss = ', loss)

            loss.backward()

            torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm)
            '''for param in filter(lambda p: p.requires_grad,model.parameters()):
                print(param.grad.data.sum())'''

            #start debugger
            #import pdb; pdb.set_trace()

            optimizer.step()
            optimizer.optimizer.zero_grad()
            #optimizer.step()
            #optimizer.zero_grad()
            '''print('grad check :')
            for f in model.parameters():
                print('data is')
                print(f.data)
                print('grad is')
                print(f.grad)'''

            train_loss += loss.item()

            top1.add_point_verb_only(verb_predict, verb)
            top5.add_point_verb_only(verb_predict, verb)

            if total_steps % print_freq == 0:
                top1_a = top1.get_average_results()
                top5_a = top5.get_average_results()
                print("{},{},{}, {} , {}, loss = {:.2f}, avg loss = {:.2f}".
                      format(total_steps - 1, epoch, i,
                             utils.format_dict(top1_a, "{:.2f}", "1-"),
                             utils.format_dict(top5_a, "{:.2f}", "5-"),
                             loss.item(), train_loss /
                             ((total_steps - 1) % eval_frequency)))

            if total_steps % checkpoint_at == 0:
                #assuming this is the best model at each cycle
                checkpoint_number = math.ceil(total_steps / checkpoint_at)
                torch.save(
                    model.state_dict(), model_dir +
                    "/verb_only256_cosanwr_p1_chkp{0}.model".format(
                        checkpoint_number))
                print('New model saved at cycle {0}'.format(checkpoint_number))
                checkp_list.append(checkpoint_number)

                top1, top5, val_loss = eval(
                    model, checkp_list, model_dir,
                    "/verb_only256_cosanwr_p1_chkp{0}.model", dev_loader,
                    encoder, gpu_mode)
                model.train()

                top1_avg = top1.get_average_results()
                top5_avg = top5.get_average_results()

                avg_score = top1_avg["verb"] + top5_avg["verb"]
                avg_score /= 2

                print('Dev {} average :{:.2f} {} {}'.format(
                    total_steps - 1, avg_score * 100,
                    utils.format_dict(top1_avg, '{:.2f}', '1-'),
                    utils.format_dict(top5_avg, '{:.2f}', '5-')))

                print('current train loss', train_loss)
                train_loss = 0
                top1 = imsitu_scorer(encoder, 1, 3)
                top5 = imsitu_scorer(encoder, 5, 3)

            del verb_predict, loss, img, verb, roles, labels
            #break
        print('Epoch ', epoch, ' completed!')
def train(model,
          train_loader,
          dev_loader,
          traindev_loader,
          optimizer,
          scheduler,
          max_epoch,
          model_dir,
          encoder,
          gpu_mode,
          clip_norm,
          lr_max,
          eval_frequency=4000):
    model.train()
    train_loss = 0
    total_steps = 0
    print_freq = 400
    dev_score_list = []
    '''if model.gpu_mode >= 0 :
        ngpus = 2
        device_array = [i for i in range(0,ngpus)]

        pmodel = torch.nn.DataParallel(model, device_ids=device_array)
    else:
        pmodel = model'''
    pmodel = model

    top1 = imsitu_scorer(encoder, 1, 3)
    top5 = imsitu_scorer(encoder, 5, 3)
    '''print('init param data check :')
    for f in model.parameters():
        if f.requires_grad:
            print(f.data.size())'''

    for epoch in range(max_epoch):
        #print('current sample : ', i, img.size(), verb.size(), roles.size(), labels.size())
        #sizes batch_size*3*height*width, batch*504*1, batch*6*190*1, batch*3*6*lebale_count*1
        mx = len(train_loader)
        for i, (p_img, p_verb, n_img, n_verb) in enumerate(train_loader):
            #print("epoch{}-{}/{} batches\r".format(epoch,i+1,mx)) ,
            total_steps += 1
            if gpu_mode >= 0:
                p_img = torch.autograd.Variable(p_img.cuda())
                p_verb = torch.autograd.Variable(p_verb.cuda())
                n_img = torch.autograd.Variable(n_img.cuda())
                n_verb = torch.autograd.Variable(n_verb.cuda())
            else:
                p_img = torch.autograd.Variable(p_img)
                p_verb = torch.autograd.Variable(p_verb)
                n_img = torch.autograd.Variable(n_img)
                n_verb = torch.autograd.Variable(n_verb)

            #optimizer.zero_grad()
            '''print('all inputs')
            print(img)
            print('=========================================================================')
            print(verb)
            print('=========================================================================')
            print(roles)
            print('=========================================================================')
            print(labels)'''

            p_img_rep, p_verb_rep = pmodel(p_img, p_verb)
            n_img_rep, n_verb_rep = pmodel(n_img, n_verb)
            p_img_pred = pmodel.classifier_forward(p_img)
            n_img_pred = pmodel.classifier_forward(n_img)
            #print('came here')
            '''g = make_dot(verb_predict, model.state_dict())
            g.view()'''

            predicted_labels = torch.cat([p_img_pred, n_img_pred], 0)
            gt_labels = torch.cat([p_verb, n_verb], 0)

            p_loss = model.calculate_loss(predicted_labels, gt_labels)
            #n_loss = model.calculate_loss(n_img_pred, n_verb)
            triplet_loss = model.triplet_loss(p_img_rep, p_verb_rep, n_img_rep,
                                              n_verb_rep)
            loss = triplet_loss + p_loss
            #loss = p_loss
            #loss = triplet_loss
            #print('current loss = ', loss)

            loss.backward()
            #print('done')

            # torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm)

            optimizer.step()
            optimizer.zero_grad()

            train_loss += loss.item()

            top1.add_point_verb_only(p_img_pred, p_verb)
            top5.add_point_verb_only(p_img_pred, p_verb)

            if total_steps % print_freq == 0:
                top1_a = top1.get_average_results()
                top5_a = top5.get_average_results()
                print("{},{},{}, {} , {}, loss = {:.2f}, avg loss = {:.2f}".
                      format(total_steps - 1, epoch, i,
                             utils.format_dict(top1_a, "{:.2f}", "1-"),
                             utils.format_dict(top5_a, "{:.2f}", "5-"),
                             loss.item(), train_loss /
                             ((total_steps - 1) % eval_frequency)))

            #del verb_predict, loss, img, verb, roles, labels
            #break
        print('Epoch ', epoch, ' completed!')

        top1, top5, val_loss = eval(model, dev_loader, encoder, gpu_mode)
        model.train()

        top1_avg = top1.get_average_results()
        top5_avg = top5.get_average_results()

        avg_score = top1_avg["verb"] + top5_avg["verb"]
        avg_score /= 8

        print('Dev {} average :{:.2f} {} {}'.format(
            total_steps - 1, avg_score * 100,
            utils.format_dict(top1_avg, '{:.2f}', '1-'),
            utils.format_dict(top5_avg, '{:.2f}', '5-')))

        dev_score_list.append(avg_score)
        max_score = max(dev_score_list)

        if max_score == dev_score_list[-1]:
            torch.save(model.state_dict(),
                       model_dir + "/verbonly_vgg16_rank.model")
            print('New best model saved! {0}'.format(max_score))

        print('current train loss', train_loss)
        train_loss = 0
        top1 = imsitu_scorer(encoder, 1, 3)
        top5 = imsitu_scorer(encoder, 5, 3)

        scheduler.step()