コード例 #1
0
def training():
	global count, val_acc, val_err
	model.train()
	bar = progressbar.ProgressBar(max_value=max_iterations)
	for i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):
		# forward steps
		# output
		label_real = Variable(sample_real['label'].squeeze().cuda())
		ydata_bin_real = Variable(sample_real['ydata_bin'].cuda())
		ydata_real = Variable(sample_real['ydata'].cuda())
		xdata_real = Variable(sample_real['xdata'].cuda())
		output_real = model(xdata_real)
		output_cat_real = output_real[0]
		output_bin_real = output_real[1]
		output_res_real = output_real[2]
		label_render = Variable(sample_render['label'].squeeze().cuda())
		ydata_bin_render = Variable(sample_render['ydata_bin'].cuda())
		ydata_render = Variable(sample_render['ydata'].cuda())
		xdata_render = Variable(sample_render['xdata'].cuda())
		output_render = model(xdata_render)
		output_cat_render = output_render[0]
		output_bin_render = output_render[1]
		output_res_render = output_render[2]
		output_bin = torch.cat((output_bin_real, output_bin_render))
		output_res = torch.cat((output_res_real, output_res_render))
		ydata_bin = torch.cat((ydata_bin_real, ydata_bin_render))
		ydata = torch.cat((ydata_real, ydata_render))
		# loss
		Lc_cat = ce_loss(output_cat_real, label_real)   # use only real images for category loss
		Lc_pose = ce_loss(output_bin, ydata_bin)        # use all images for pose loss - bin part
		ind = torch.argmax(output_bin, dim=1)
		y = torch.index_select(cluster_centers_, 0, ind) + output_res
		Lr = gve_loss(y, ydata)                         # gve loss on final pose
		loss = 0.1*Lc_cat + Lc_pose + args.alpha*Lr
		# parameter updates
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()
		# store
		count += 1
		writer.add_scalar('train_loss', loss.item(), count)
		if i % 1000 == 0:
			ytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()
			spio.savemat(results_file, {'ytrue_cat': ytrue_cat, 'ytrue_pose': ytrue_pose, 'ypred_cat': ypred_cat, 'ypred_pose': ypred_pose})
			tmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)
			tmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)
			writer.add_scalar('val_acc', tmp_acc, count)
			writer.add_scalar('val_err', tmp_err, count)
			val_acc.append(tmp_acc)
			val_err.append(tmp_err)
		# cleanup
		del label_real, ydata_bin_real, ydata_real, xdata_real, output_real, output_res_real, output_bin_real, output_cat_real
		del label_render, ydata_bin_render, ydata_render, xdata_render, output_render, output_res_render, output_bin_render, output_cat_render
		del	output_bin, output_res, ydata_bin, ydata, Lc_cat, Lc_pose, Lr, loss
		bar.update(i+1)
	real_loader.dataset.shuffle_images()
	render_loader.dataset.shuffle_images()
コード例 #2
0
def training():
	global count, val_err, val_acc
	model.train()
	bar = progressbar.ProgressBar(max_value=max_iterations)
	for i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):
		# forward steps
		# output
		xdata_real = Variable(sample_real['xdata'].cuda())
		label_real = Variable(sample_real['label'].cuda())
		ydata_bin_real = Variable(sample_real['ydata_bin'].cuda())
		ydata_real = Variable(sample_real['ydata'].cuda())
		output_real = model(xdata_real)
		xdata_render = Variable(sample_render['xdata'].cuda())
		label_render = Variable(sample_render['label'].cuda())
		ydata_bin_render = Variable(sample_render['ydata_bin'].cuda())
		ydata_render = Variable(sample_render['ydata'].cuda())
		output_render = model(xdata_render)
		# loss
		ydata_bin = torch.cat((ydata_bin_real, ydata_bin_render))
		ydata = torch.cat((ydata_real, ydata_render))
		output_bin = torch.cat((output_real[1], output_render[1]))
		ind = torch.argmax(output_bin, dim=1)
		y = torch.index_select(cluster_centers_, 0, ind)
		output = y + torch.cat((output_real[2], output_render[2]))
		Lc_cat = ce_loss(output_real[0], label_real.squeeze())
		Lc = ce_loss(output_bin, ydata_bin)
		Lr = gve_loss(output, ydata)
		loss = 0.1*Lc_cat + Lc + 10*Lr
		# parameter updates
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()
		# store
		count += 1
		writer.add_scalar('train_loss', loss.item(), count)
		if i % 1000 == 0:
			ytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()
			spio.savemat(results_file, {'ytrue_cat': ytrue_cat, 'ytrue_pose': ytrue_pose, 'ypred_cat': ypred_cat, 'ypred_pose': ypred_pose})
			tmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)
			tmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)
			writer.add_scalar('val_acc', tmp_acc, count)
			writer.add_scalar('val_err', tmp_err, count)
			val_acc.append(tmp_acc)
			val_err.append(tmp_err)
		# cleanup
		del xdata_real, xdata_render, label_real, label_render, ydata_bin_real, ydata_bin_render
		del ydata_bin, ydata, output_bin, output, ydata_real, ydata_render
		del output_real, output_render, loss, sample_real, sample_render, Lr, Lc, Lc_cat
		bar.update(i)
		# stop
		if i == max_iterations:
			break
	render_loader.dataset.shuffle_images()
	real_loader.dataset.shuffle_images()
コード例 #3
0
def training():
    global count, val_acc, val_err  #, s
    model.train()
    bar = progressbar.ProgressBar(max_value=len(train_loader))
    for i, sample in enumerate(train_loader):
        # forward steps
        # output
        label = Variable(sample['label'].squeeze().cuda())
        ydata_bin = Variable(sample['ydata_bin'].cuda())
        ydata = Variable(sample['ydata'].cuda())
        xdata = Variable(sample['xdata'].cuda())
        output = model(xdata)
        output_cat = output[0]
        output_bin = output[1]
        output_res = output[2]
        # loss
        Lc_cat = ce_loss(output_cat, label)
        Lc_pose = ce_loss(output_bin, ydata_bin)
        ind = torch.argmax(output_bin, dim=1)
        y = torch.index_select(cluster_centers_, 0, ind) + output_res
        Lr = gve_loss(y, ydata)
        # loss = 0.1*Lc_cat + Lc_pose + math.exp(-s)*Lr + s
        loss = 0.1 * Lc_cat + Lc_pose + Lr
        # parameter updates
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # s = math.log(Lr)
        # store
        count += 1
        writer.add_scalar('train_loss', loss.item(), count)
        # writer.add_scalar('alpha', math.exp(-s), count)
        if i % 1000 == 0:
            ytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()
            spio.savemat(
                results_file, {
                    'ytrue_cat': ytrue_cat,
                    'ytrue_pose': ytrue_pose,
                    'ypred_cat': ypred_cat,
                    'ypred_pose': ypred_pose
                })
            tmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)
            tmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat,
                                 num_classes)
            writer.add_scalar('val_acc', tmp_acc, count)
            writer.add_scalar('val_err', tmp_err, count)
            val_acc.append(tmp_acc)
            val_err.append(tmp_err)
        # cleanup
        del xdata, label, output, loss, output_cat, output_bin, output_res
        bar.update(i + 1)
    train_loader.dataset.shuffle_images()
コード例 #4
0
def training():
    global count, val_err, val_acc
    model.train()
    bar = progressbar.ProgressBar(max_value=max_iterations)
    for i, (sample_real,
            sample_render) in enumerate(zip(real_loader, render_loader)):
        # forward steps
        xdata_real = Variable(sample_real['xdata'].cuda())
        label_real = Variable(sample_real['label'].cuda())
        ydata_real = Variable(sample_real['ydata'].cuda())
        output_real = model(xdata_real)
        xdata_render = Variable(sample_render['xdata'].cuda())
        label_render = Variable(sample_render['label'].cuda())
        ydata_render = Variable(sample_render['ydata'].cuda())
        output_render = model(xdata_render)
        output_pose = torch.cat((output_real[1], output_render[1]))
        gt_pose = torch.cat((ydata_real, ydata_render))
        Lr = gve_loss(output_pose, gt_pose)
        Lc = ce_loss(output_real[0], label_real.squeeze())
        loss = 0.1 * Lc + Lr
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # store
        count += 1
        writer.add_scalar('train_loss', loss.item(), count)
        if i % 1000 == 0:
            ytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()
            spio.savemat(
                results_file, {
                    'ytrue_cat': ytrue_cat,
                    'ytrue_pose': ytrue_pose,
                    'ypred_cat': ypred_cat,
                    'ypred_pose': ypred_pose
                })
            tmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)
            tmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat,
                                 num_classes)
            writer.add_scalar('val_acc', tmp_acc, count)
            writer.add_scalar('val_err', tmp_err, count)
            val_acc.append(tmp_acc)
            val_err.append(tmp_err)
        # cleanup
        del xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render, Lr, Lc
        del output_real, output_render, sample_real, sample_render, loss, output_pose, gt_pose
        bar.update(i)
        # stop
        if i == max_iterations:
            break
    render_loader.dataset.shuffle_images()
    real_loader.dataset.shuffle_images()
コード例 #5
0
    return ytrue_cat, ytrue_pose, ypred_cat, ypred_pose


def save_checkpoint(filename):
    torch.save(model.state_dict(), filename)


ytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()
spio.savemat(
    results_file, {
        'ytrue_cat': ytrue_cat,
        'ytrue_pose': ytrue_pose,
        'ypred_cat': ypred_cat,
        'ypred_pose': ypred_pose
    })
tmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)
tmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)
print('Acc: {0} \t Err: {1}'.format(tmp_acc, tmp_err))

for epoch in range(args.num_epochs):
    tic = time.time()
    scheduler.step()
    # training step
    training()
    # save model at end of epoch
    save_checkpoint(model_file)
    # validation
    ytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()
    spio.savemat(
        results_file, {
            'ytrue_cat': ytrue_cat,
コード例 #6
0
    ytrue = np.concatenate(ytrue)
    # model.train()
    return ytrue, ypred


def save_checkpoint(filename):
    torch.save(model.state_dict(), filename)


for epoch in range(num_epochs):
    tic = time.time()
    scheduler.step()
    # training step
    training()
    # save model at end of epoch
    save_checkpoint(model_file)
    # evaluate
    ygt, ypred = testing()
    print('Acc: {0}'.format(get_accuracy(ygt, ypred, num_classes)))
    spio.savemat(results_file, {'ygt': ygt, 'ypred': ypred})
    # time and output
    toc = time.time() - tic
    print('Epoch: {0} in time {1}s'.format(epoch, toc))
    # cleanup
    gc.collect()

# evaluate the model
ygt, ypred = testing()
print('Acc: {0}'.format(get_accuracy(ygt, ypred, num_classes)))
spio.savemat(results_file, {'ygt': ygt, 'ypred': ypred})