def training_init():
    global count, val_loss, s
    model.train()
    bar = progressbar.ProgressBar(max_value=max_iterations)
    for i, (sample_real,
            sample_render) in enumerate(zip(real_loader, render_loader)):
        # forward steps
        # outputs
        xdata_real = Variable(sample_real['xdata'].cuda())
        label_real = Variable(sample_real['label'].cuda())
        ydata_real = [
            Variable(sample_real['ydata_bin'].cuda()),
            Variable(sample_real['ydata_res'].cuda())
        ]
        output_real = model(xdata_real, label_real)
        xdata_render = Variable(sample_render['xdata'].cuda())
        label_render = Variable(sample_render['label'].cuda())
        ydata_render = [
            Variable(sample_render['ydata_bin'].cuda()),
            Variable(sample_render['ydata_res'].cuda())
        ]
        output_render = model(xdata_render, label_render)
        # loss
        ydata_bin = torch.cat((ydata_real[0], ydata_render[0]))
        ydata_res = torch.cat((ydata_real[1], ydata_render[1]))
        output_bin = torch.cat((output_real[0], output_render[0]))
        output_res = torch.cat((output_real[1], output_render[1]))
        Lc = ce_loss(output_bin, ydata_bin)
        Lr = mse_loss(output_res, ydata_res)
        loss = Lc + 0.5 * math.exp(-2 * s) * Lr + s
        # parameter updates
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        s = 0.5 * math.log(Lr)
        # store
        count += 1
        writer.add_scalar('train_loss', loss.item(), count)
        writer.add_scalar('alpha', 0.5 * math.exp(-2 * s), count)
        if i % 1000 == 0:
            ytest, yhat_test, test_labels = testing()
            spio.savemat(results_file, {
                'ytest': ytest,
                'yhat_test': yhat_test,
                'test_labels': test_labels
            })
            tmp_val_loss = get_error2(ytest, yhat_test, test_labels,
                                      num_classes)
            writer.add_scalar('val_loss', tmp_val_loss, count)
            val_loss.append(tmp_val_loss)
        # cleanup
        del xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render
        del ydata_bin, ydata_res, output_bin, output_res
        del output_real, output_render, loss, sample_real, sample_render
        bar.update(i)
        # stop
        if i == max_iterations:
            break
    render_loader.dataset.shuffle_images()
    real_loader.dataset.shuffle_images()
Esempio n. 2
0
def training():
	global count, val_acc, val_err
	model.train()
	bar = progressbar.ProgressBar(max_value=max_iterations)
	for i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):
		# forward steps
		# output
		label_real = Variable(sample_real['label'].squeeze().cuda())
		ydata_bin_real = Variable(sample_real['ydata_bin'].cuda())
		ydata_real = Variable(sample_real['ydata'].cuda())
		xdata_real = Variable(sample_real['xdata'].cuda())
		output_real = model(xdata_real)
		output_cat_real = output_real[0]
		output_bin_real = output_real[1]
		output_res_real = output_real[2]
		label_render = Variable(sample_render['label'].squeeze().cuda())
		ydata_bin_render = Variable(sample_render['ydata_bin'].cuda())
		ydata_render = Variable(sample_render['ydata'].cuda())
		xdata_render = Variable(sample_render['xdata'].cuda())
		output_render = model(xdata_render)
		output_cat_render = output_render[0]
		output_bin_render = output_render[1]
		output_res_render = output_render[2]
		output_bin = torch.cat((output_bin_real, output_bin_render))
		output_res = torch.cat((output_res_real, output_res_render))
		ydata_bin = torch.cat((ydata_bin_real, ydata_bin_render))
		ydata = torch.cat((ydata_real, ydata_render))
		# loss
		Lc_cat = ce_loss(output_cat_real, label_real)   # use only real images for category loss
		Lc_pose = ce_loss(output_bin, ydata_bin)        # use all images for pose loss - bin part
		ind = torch.argmax(output_bin, dim=1)
		y = torch.index_select(cluster_centers_, 0, ind) + output_res
		Lr = gve_loss(y, ydata)                         # gve loss on final pose
		loss = 0.1*Lc_cat + Lc_pose + args.alpha*Lr
		# parameter updates
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()
		# store
		count += 1
		writer.add_scalar('train_loss', loss.item(), count)
		if i % 1000 == 0:
			ytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()
			spio.savemat(results_file, {'ytrue_cat': ytrue_cat, 'ytrue_pose': ytrue_pose, 'ypred_cat': ypred_cat, 'ypred_pose': ypred_pose})
			tmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)
			tmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)
			writer.add_scalar('val_acc', tmp_acc, count)
			writer.add_scalar('val_err', tmp_err, count)
			val_acc.append(tmp_acc)
			val_err.append(tmp_err)
		# cleanup
		del label_real, ydata_bin_real, ydata_real, xdata_real, output_real, output_res_real, output_bin_real, output_cat_real
		del label_render, ydata_bin_render, ydata_render, xdata_render, output_render, output_res_render, output_bin_render, output_cat_render
		del	output_bin, output_res, ydata_bin, ydata, Lc_cat, Lc_pose, Lr, loss
		bar.update(i+1)
	real_loader.dataset.shuffle_images()
	render_loader.dataset.shuffle_images()
Esempio n. 3
0
def training():
	global count, val_err, val_acc
	model.train()
	bar = progressbar.ProgressBar(max_value=max_iterations)
	for i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):
		# forward steps
		# output
		xdata_real = Variable(sample_real['xdata'].cuda())
		label_real = Variable(sample_real['label'].cuda())
		ydata_bin_real = Variable(sample_real['ydata_bin'].cuda())
		ydata_real = Variable(sample_real['ydata'].cuda())
		output_real = model(xdata_real)
		xdata_render = Variable(sample_render['xdata'].cuda())
		label_render = Variable(sample_render['label'].cuda())
		ydata_bin_render = Variable(sample_render['ydata_bin'].cuda())
		ydata_render = Variable(sample_render['ydata'].cuda())
		output_render = model(xdata_render)
		# loss
		ydata_bin = torch.cat((ydata_bin_real, ydata_bin_render))
		ydata = torch.cat((ydata_real, ydata_render))
		output_bin = torch.cat((output_real[1], output_render[1]))
		ind = torch.argmax(output_bin, dim=1)
		y = torch.index_select(cluster_centers_, 0, ind)
		output = y + torch.cat((output_real[2], output_render[2]))
		Lc_cat = ce_loss(output_real[0], label_real.squeeze())
		Lc = ce_loss(output_bin, ydata_bin)
		Lr = gve_loss(output, ydata)
		loss = 0.1*Lc_cat + Lc + 10*Lr
		# parameter updates
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()
		# store
		count += 1
		writer.add_scalar('train_loss', loss.item(), count)
		if i % 1000 == 0:
			ytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()
			spio.savemat(results_file, {'ytrue_cat': ytrue_cat, 'ytrue_pose': ytrue_pose, 'ypred_cat': ypred_cat, 'ypred_pose': ypred_pose})
			tmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)
			tmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)
			writer.add_scalar('val_acc', tmp_acc, count)
			writer.add_scalar('val_err', tmp_err, count)
			val_acc.append(tmp_acc)
			val_err.append(tmp_err)
		# cleanup
		del xdata_real, xdata_render, label_real, label_render, ydata_bin_real, ydata_bin_render
		del ydata_bin, ydata, output_bin, output, ydata_real, ydata_render
		del output_real, output_render, loss, sample_real, sample_render, Lr, Lc, Lc_cat
		bar.update(i)
		# stop
		if i == max_iterations:
			break
	render_loader.dataset.shuffle_images()
	real_loader.dataset.shuffle_images()
Esempio n. 4
0
def training():
	global count, val_loss, s, num_ensemble
	model.train()
	bar = progressbar.ProgressBar(max_value=max_iterations)
	for i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):
		# forward steps
		# output
		xdata_real = Variable(sample_real['xdata'].cuda())
		label_real = Variable(sample_real['label'].cuda())
		ydata_real = [Variable(sample_real['ydata_bin'].cuda()), Variable(sample_real['ydata'].cuda())]
		output_real = model(xdata_real, label_real)
		xdata_render = Variable(sample_render['xdata'].cuda())
		label_render = Variable(sample_render['label'].cuda())
		ydata_render = [Variable(sample_render['ydata_bin'].cuda()), Variable(sample_render['ydata'].cuda())]
		output_render = model(xdata_render, label_render)
		# loss
		ydata_bin = torch.cat((ydata_real[0], ydata_render[0]))
		ydata = torch.cat((ydata_real[1], ydata_render[1]))
		output_bin = torch.cat((output_real[0], output_render[0]))
		_, ind = torch.max(output_bin, dim=1)
		y = torch.index_select(cluster_centers_, 0, ind)
		output = y + torch.cat((output_real[1], output_render[1]))
		Lc = ce_loss(output_bin, ydata_bin)
		Lr = l1_loss(output, ydata)
		loss = Lc + math.exp(-s)*Lr + s
		# parameter updates
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()
		s = math.log(Lr)
		# store
		writer.add_scalar('train_loss', loss.item(), count)
		writer.add_scalar('alpha', math.exp(-s), count)
		if i % 500 == 0:
			ytest, yhat_test, test_labels = testing()
			tmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)
			writer.add_scalar('val_loss', tmp_val_loss, count)
			val_loss.append(tmp_val_loss)
		count += 1
		if count % optimizer.c == optimizer.c / 2:
			ytest, yhat_test, test_labels = testing()
			num_ensemble += 1
			results_file = os.path.join(results_dir, 'num' + str(num_ensemble))
			spio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})
		# cleanup
		del xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render
		del ydata_bin, ydata, output_bin, output
		del output_real, output_render, sample_real, sample_render, loss
		bar.update(i)
		# stop
		if i == max_iterations:
			break
	render_loader.dataset.shuffle_images()
	real_loader.dataset.shuffle_images()
def training():
    global count, val_loss
    model.train()
    bar = progressbar.ProgressBar(max_value=max_iterations)
    for i, (sample_real,
            sample_render) in enumerate(zip(real_loader, render_loader)):
        # forward steps
        # output
        xdata_real = Variable(sample_real['xdata'].cuda())
        ydata_bin_real = Variable(sample_real['ydata_bin'].cuda())
        ydata_real = Variable(sample_real['ydata'].cuda())
        output_real = model(xdata_real)
        xdata_render = Variable(sample_render['xdata'].cuda())
        ydata_bin_render = Variable(sample_render['ydata_bin'].cuda())
        ydata_render = Variable(sample_render['ydata'].cuda())
        output_render = model(xdata_render)
        # loss
        ydata_bin = torch.cat((ydata_bin_real, ydata_bin_render))
        ydata = torch.cat((ydata_real, ydata_render))
        output_bin = torch.cat((output_real[0], output_render[0]))
        ind = torch.argmax(output_bin, dim=1)
        y = torch.index_select(cluster_centers_, 0, ind)
        output = y + torch.cat((output_real[1], output_render[1]))
        Lc = ce_loss(output_bin, ydata_bin)
        Lr = gve_loss(output, ydata)
        loss = Lc + 10 * Lr
        # parameter updates
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # store
        count += 1
        writer.add_scalar('train_loss', loss.item(), count)
        if i % 1000 == 0:
            ytest, yhat_test, test_labels = testing()
            spio.savemat(results_file, {
                'ytest': ytest,
                'yhat_test': yhat_test,
                'test_labels': test_labels
            })
            tmp_val_loss = get_error2(ytest, yhat_test, test_labels,
                                      num_classes)
            writer.add_scalar('val_loss', tmp_val_loss, count)
            val_loss.append(tmp_val_loss)
        # cleanup
        del xdata_real, xdata_render, ydata_bin_real, ydata_bin_render
        del ydata_bin, ydata, output_bin, output, ydata_real, ydata_render
        del output_real, output_render, loss, sample_real, sample_render, Lr, Lc
        bar.update(i)
        # stop
        if i == max_iterations:
            break
    render_loader.dataset.shuffle_images()
    real_loader.dataset.shuffle_images()
Esempio n. 6
0
def training():
    global count, val_acc, val_err  #, s
    model.train()
    bar = progressbar.ProgressBar(max_value=len(train_loader))
    for i, sample in enumerate(train_loader):
        # forward steps
        # output
        label = Variable(sample['label'].squeeze().cuda())
        ydata_bin = Variable(sample['ydata_bin'].cuda())
        ydata = Variable(sample['ydata'].cuda())
        xdata = Variable(sample['xdata'].cuda())
        output = model(xdata)
        output_cat = output[0]
        output_bin = output[1]
        output_res = output[2]
        # loss
        Lc_cat = ce_loss(output_cat, label)
        Lc_pose = ce_loss(output_bin, ydata_bin)
        ind = torch.argmax(output_bin, dim=1)
        y = torch.index_select(cluster_centers_, 0, ind) + output_res
        Lr = gve_loss(y, ydata)
        # loss = 0.1*Lc_cat + Lc_pose + math.exp(-s)*Lr + s
        loss = 0.1 * Lc_cat + Lc_pose + Lr
        # parameter updates
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # s = math.log(Lr)
        # store
        count += 1
        writer.add_scalar('train_loss', loss.item(), count)
        # writer.add_scalar('alpha', math.exp(-s), count)
        if i % 1000 == 0:
            ytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()
            spio.savemat(
                results_file, {
                    'ytrue_cat': ytrue_cat,
                    'ytrue_pose': ytrue_pose,
                    'ypred_cat': ypred_cat,
                    'ypred_pose': ypred_pose
                })
            tmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)
            tmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat,
                                 num_classes)
            writer.add_scalar('val_acc', tmp_acc, count)
            writer.add_scalar('val_err', tmp_err, count)
            val_acc.append(tmp_acc)
            val_err.append(tmp_err)
        # cleanup
        del xdata, label, output, loss, output_cat, output_bin, output_res
        bar.update(i + 1)
    train_loader.dataset.shuffle_images()
Esempio n. 7
0
def training():
    global count, val_err, val_acc
    model.train()
    bar = progressbar.ProgressBar(max_value=max_iterations)
    for i, (sample_real,
            sample_render) in enumerate(zip(real_loader, render_loader)):
        # forward steps
        xdata_real = Variable(sample_real['xdata'].cuda())
        label_real = Variable(sample_real['label'].cuda())
        ydata_real = Variable(sample_real['ydata'].cuda())
        output_real = model(xdata_real)
        xdata_render = Variable(sample_render['xdata'].cuda())
        label_render = Variable(sample_render['label'].cuda())
        ydata_render = Variable(sample_render['ydata'].cuda())
        output_render = model(xdata_render)
        output_pose = torch.cat((output_real[1], output_render[1]))
        gt_pose = torch.cat((ydata_real, ydata_render))
        Lr = gve_loss(output_pose, gt_pose)
        Lc = ce_loss(output_real[0], label_real.squeeze())
        loss = 0.1 * Lc + Lr
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # store
        count += 1
        writer.add_scalar('train_loss', loss.item(), count)
        if i % 1000 == 0:
            ytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()
            spio.savemat(
                results_file, {
                    'ytrue_cat': ytrue_cat,
                    'ytrue_pose': ytrue_pose,
                    'ypred_cat': ypred_cat,
                    'ypred_pose': ypred_pose
                })
            tmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)
            tmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat,
                                 num_classes)
            writer.add_scalar('val_acc', tmp_acc, count)
            writer.add_scalar('val_err', tmp_err, count)
            val_acc.append(tmp_acc)
            val_err.append(tmp_err)
        # cleanup
        del xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render, Lr, Lc
        del output_real, output_render, sample_real, sample_render, loss, output_pose, gt_pose
        bar.update(i)
        # stop
        if i == max_iterations:
            break
    render_loader.dataset.shuffle_images()
    real_loader.dataset.shuffle_images()
def training():
    global count, val_loss
    model.train()
    bar = progressbar.ProgressBar(max_value=max_iterations)
    for i, (sample_real,
            sample_render) in enumerate(zip(real_loader, render_loader)):
        # forward steps
        xdata_real = Variable(sample_real['xdata'].cuda())
        label_real = Variable(sample_real['label'].cuda())
        ydata_real = [
            Variable(sample_real['ydata_bin'].cuda()),
            Variable(sample_real['ydata'].cuda())
        ]
        output_real = model(xdata_real, label_real)
        loss_real = criterion2(output_real, ydata_real)
        xdata_render = Variable(sample_render['xdata'].cuda())
        label_render = Variable(sample_render['label'].cuda())
        ydata_render = [
            Variable(sample_render['ydata_bin'].cuda()),
            Variable(sample_render['ydata'].cuda())
        ]
        output_render = model(xdata_render, label_render)
        loss_render = criterion2(output_render, ydata_render)
        loss = loss_real + loss_render
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # store
        writer.add_scalar('train_loss', loss.item(), count)
        if i % 1000 == 0:
            ytest, yhat_test, test_labels = testing()
            spio.savemat(results_file, {
                'ytest': ytest,
                'yhat_test': yhat_test,
                'test_labels': test_labels
            })
            tmp_val_loss = get_error2(ytest, yhat_test, test_labels,
                                      num_classes)
            writer.add_scalar('val_loss', tmp_val_loss, count)
            val_loss.append(tmp_val_loss)
        count += 1
        # cleanup
        del xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render
        del output_real, output_render, loss_real, loss_render, sample_real, sample_render, loss
        bar.update(i)
        # stop
        if i == max_iterations:
            break
    render_loader.dataset.shuffle_images()
    real_loader.dataset.shuffle_images()
Esempio n. 9
0
def training():
    global count, val_loss, num_ensemble
    model.train()
    bar = progressbar.ProgressBar(max_value=len(real_loader))
    for i, (sample_real,
            sample_render) in enumerate(zip(real_loader, render_loader)):
        # forward steps
        xdata_real = Variable(sample_real['xdata'].cuda())
        label_real = Variable(sample_real['label'].cuda())
        ydata = sample_real['ydata'].numpy()
        ydata_real = Variable(
            torch.from_numpy(kmeans.predict(ydata)).long().cuda())
        output_real = model(xdata_real, label_real)
        loss_real = criterion(output_real, ydata_real)
        xdata_render = Variable(sample_render['xdata'].cuda())
        label_render = Variable(sample_render['label'].cuda())
        ydata = sample_render['ydata'].numpy()
        ydata_render = Variable(
            torch.from_numpy(kmeans.predict(ydata)).long().cuda())
        output_render = model(xdata_render, label_render)
        loss_render = criterion(output_render, ydata_render)
        loss = loss_real + loss_render
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # store
        writer.add_scalar('train_loss', loss.item(), count)
        if i % 500 == 0:
            ytest, yhat_test, test_labels = testing()
            tmp_val_loss = get_error2(ytest, yhat_test, test_labels,
                                      num_classes)
            writer.add_scalar('val_loss', tmp_val_loss, count)
            val_loss.append(tmp_val_loss)
        count += 1
        if count % optimizer.c == optimizer.c / 2:
            ytest, yhat_test, test_labels = testing()
            num_ensemble += 1
            results_file = os.path.join(results_dir, 'num' + str(num_ensemble))
            spio.savemat(results_file, {
                'ytest': ytest,
                'yhat_test': yhat_test,
                'test_labels': test_labels
            })
        # cleanup
        del xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render
        del output_real, output_render, loss_real, loss_render, sample_real, sample_render, loss
        bar.update(i)
    render_loader.dataset.shuffle_images()
    real_loader.dataset.shuffle_images()
Esempio n. 10
0
def training_init():
    global count, val_loss
    model.train()
    bar = progressbar.ProgressBar(max_value=max_iterations)
    for i, sample in enumerate(train_loader):
        # forward steps
        # outputs
        xdata = Variable(sample['xdata'].cuda())
        ydata_bin = Variable(sample['ydata_bin'].cuda())
        ydata_res = Variable(sample['ydata_res'].cuda())
        output = model(xdata)
        # loss
        Lc = ce_loss(output[0], ydata_bin)
        Lr = mse_loss(output[1], ydata_res)
        loss = Lc + Lr
        # parameter updates
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # store
        count += 1
        writer.add_scalar('train_loss', loss.item(), count)
        if i % 1000 == 0:
            ytest, yhat_test, test_labels = testing()
            spio.savemat(results_file, {
                'ytest': ytest,
                'yhat_test': yhat_test,
                'test_labels': test_labels
            })
            tmp_val_loss = get_error2(ytest, yhat_test, test_labels,
                                      num_classes)
            writer.add_scalar('val_loss', tmp_val_loss, count)
            val_loss.append(tmp_val_loss)
        # cleanup
        del xdata, ydata_bin, ydata_res, output, loss, sample, Lr, Lc
        bar.update(i)
        # stop
        if i == max_iterations:
            break
    train_loader.dataset.shuffle_images()

def save_checkpoint(filename):
    torch.save(model.state_dict(), filename)


ytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()
spio.savemat(
    results_file, {
        'ytrue_cat': ytrue_cat,
        'ytrue_pose': ytrue_pose,
        'ypred_cat': ypred_cat,
        'ypred_pose': ypred_pose
    })
tmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)
tmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)
print('Acc: {0} \t Err: {1}'.format(tmp_acc, tmp_err))

for epoch in range(args.num_epochs):
    tic = time.time()
    scheduler.step()
    # training step
    training()
    # save model at end of epoch
    save_checkpoint(model_file)
    # validation
    ytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()
    spio.savemat(
        results_file, {
            'ytrue_cat': ytrue_cat,
            'ytrue_pose': ytrue_pose,
    ypred = np.concatenate(ypred)
    ytrue = np.concatenate(ytrue)
    labels = np.concatenate(labels)
    model.train()
    return ytrue, ypred, labels


def save_checkpoint(filename):
    torch.save(model.state_dict(), filename)


# initialization
training_init()
ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(
    get_error2(ytest, yhat_test, test_labels, num_classes)))

s = 0  # reset
for epoch in range(args.num_epochs):
    tic = time.time()
    # scheduler.step()
    # training step
    training()
    # save model at end of epoch
    save_checkpoint(model_file)
    # validation
    ytest, yhat_test, test_labels = testing()
    print('\nMedErr: {0}'.format(
        get_error2(ytest, yhat_test, test_labels, num_classes)))
    # time and output
    toc = time.time() - tic
Esempio n. 13
0
		gc.collect()
	ypred = np.concatenate(ypred)
	ytrue = np.concatenate(ytrue)
	labels = np.concatenate(labels)
	model.train()
	return ytrue, ypred, labels


def save_checkpoint(filename):
	torch.save(model.state_dict(), filename)


# initialization
training_init()
ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))

for epoch in range(args.num_epochs):
	tic = time.time()
	scheduler.step()
	# training step
	training()
	# save model at end of epoch
	save_checkpoint(model_file)
	# validation
	ytest, yhat_test, test_labels = testing()
	print('\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))
	# time and output
	toc = time.time() - tic
	print('Epoch: {0} done in time {1}s'.format(epoch, toc))
	# cleanup
Esempio n. 14
0
def training():
    global count, val_loss, s
    model.train()
    bar = progressbar.ProgressBar(max_value=max_iterations)
    for i, (sample_real,
            sample_render) in enumerate(zip(real_loader, render_loader)):
        # forward steps
        xdata_real = Variable(sample_real['xdata'].cuda())
        label_real = Variable(sample_real['label'].cuda())
        output_real = model(xdata_real, label_real)
        xdata_render = Variable(sample_render['xdata'].cuda())
        label_render = Variable(sample_render['label'].cuda())
        output_render = model(xdata_render, label_render)
        # loss
        ydata_bin = torch.cat((Variable(sample_real['ydata_bin'].cuda()),
                               Variable(sample_render['ydata_bin'].cuda())))
        output_bin = torch.cat((output_real[0], output_render[0]))
        Lc = ce_loss(output_bin, ydata_bin)
        labels = torch.argmax(output_bin, dim=1)
        labels_numpy = labels.data.cpu().numpy()
        labels = torch.zeros(labels.size(0), num_clusters).scatter_(
            1,
            labels.unsqueeze(1).data.cpu(), 1.0)
        labels = Variable(labels.unsqueeze(2).float().cuda())
        ydata_numpy = np.concatenate(
            (sample_real['ydata'].data.cpu().numpy(),
             sample_render['ydata'].data.cpu().numpy()))
        ydata_res = Variable(
            torch.from_numpy(get_residuals(ydata_numpy,
                                           labels_numpy)).float().cuda())
        output_res = torch.cat((output_real[1], output_render[1]))
        Lr = mse_loss(output_res, ydata_res)
        loss = Lc + 0.5 * math.exp(-2 * s) * Lr + s
        # parameter updates
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        s = 0.5 * math.log(Lr)
        # store
        count += 1
        writer.add_scalar('train_loss', loss.item(), count)
        writer.add_scalar('alpha', 0.5 * math.exp(-2 * s), count)
        if i % 1000 == 0:
            ytest, yhat_test, test_labels = testing()
            spio.savemat(results_file, {
                'ytest': ytest,
                'yhat_test': yhat_test,
                'test_labels': test_labels
            })
            tmp_val_loss = get_error2(ytest, yhat_test, test_labels,
                                      num_classes)
            writer.add_scalar('val_loss', tmp_val_loss, count)
            val_loss.append(tmp_val_loss)
        # cleanup
        del xdata_real, xdata_render, label_real, label_render
        del output_bin, output_res, ydata_bin, ydata_res, labels
        del output_real, output_render, sample_real, sample_render, loss
        bar.update(i)
        # stop
        if i == max_iterations:
            break
    render_loader.dataset.shuffle_images()
    real_loader.dataset.shuffle_images()
Esempio n. 15
0
def training():
    global count, val_loss, s, num_ensemble
    model.train()
    bar = progressbar.ProgressBar(max_value=max_iterations)
    for i, (sample_real,
            sample_render) in enumerate(zip(real_loader, render_loader)):
        # forward steps
        xdata_real = Variable(sample_real['xdata'].cuda())
        label_real = Variable(sample_real['label'].cuda())
        output_real = model(xdata_real, label_real)
        xdata_render = Variable(sample_render['xdata'].cuda())
        label_render = Variable(sample_render['label'].cuda())
        output_render = model(xdata_render, label_render)
        # loss
        ydata_bin = torch.cat((Variable(sample_real['ydata_bin'].cuda()),
                               Variable(sample_render['ydata_bin'].cuda())))
        output_bin = torch.cat((output_real[0], output_render[0]))
        Lc = kl_div(F.log_softmax(output_bin, dim=1), ydata_bin)
        ydata = torch.cat((Variable(sample_real['ydata'].cuda()),
                           Variable(sample_render['ydata'].cuda())))
        output_res = torch.cat((output_real[1], output_render[1]))
        if not args.multires:
            Lr = torch.stack([
                gve_loss(
                    ydata,
                    torch.add(
                        output_res, 1.0,
                        cluster_centers.index_select(
                            0, Variable(j * torch.ones(1).long().cuda()))))
                for j in range(num_clusters)
            ])
        else:
            y = cluster_centers + output_res
            Lr = torch.stack([
                gve_loss(
                    ydata,
                    torch.squeeze(
                        y.index_select(
                            1, Variable(j * torch.ones(1).long().cuda()))))
                for j in range(num_clusters)
            ])
        Lr = torch.mean(
            torch.sum(torch.mul(F.softmax(output_bin, dim=1), torch.t(Lr)),
                      dim=1))
        loss = Lc + math.exp(-s) * Lr + s
        # updates
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        s = math.log(Lr)
        # store
        writer.add_scalar('train_loss', loss.item(), count)
        writer.add_scalar('alpha', math.exp(-s), count)
        if i % 500 == 0:
            ytest, yhat_test, test_labels = testing()
            tmp_val_loss = get_error2(ytest, yhat_test, test_labels,
                                      num_classes)
            writer.add_scalar('val_loss', tmp_val_loss, count)
            val_loss.append(tmp_val_loss)
        count += 1
        if count % optimizer.c == optimizer.c / 2:
            ytest, yhat_test, test_labels = testing()
            num_ensemble += 1
            results_file = os.path.join(results_dir, 'num' + str(num_ensemble))
            spio.savemat(results_file, {
                'ytest': ytest,
                'yhat_test': yhat_test,
                'test_labels': test_labels
            })
        # cleanup
        del xdata_real, xdata_render, label_real, label_render
        del ydata_bin, output_bin, ydata, output_res
        del output_real, output_render, sample_real, sample_render, loss
        bar.update(i)
        # stop
        if i == max_iterations:
            break
    render_loader.dataset.shuffle_images()
    real_loader.dataset.shuffle_images()
Esempio n. 16
0
            ])
        ypred.append(tmp_ypred)
        ytrue.append(sample['ydata'].numpy())
        labels.append(sample['label'].numpy())
        del xdata, label, output, sample
        gc.collect()
    ypred = np.concatenate(ypred)
    ytrue = np.concatenate(ytrue)
    labels = np.concatenate(labels)
    model.train()
    return ytrue, ypred, labels


ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(
    get_error2(ytest, yhat_test, test_labels, num_classes)))
results_file = os.path.join(results_dir, 'num' + str(num_ensemble))
spio.savemat(results_file, {
    'ytest': ytest,
    'yhat_test': yhat_test,
    'test_labels': test_labels
})

for epoch in range(args.num_epochs):
    tic = time.time()
    # training step
    training()
    # validation
    ytest, yhat_test, test_labels = testing()
    tmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)
    print('\nMedErr: {0}'.format(tmp_val_loss))