Beispiel #1
0
def print_result():
    num_image = 4
    img_in_list, img_out_list = val_data.next().data
    for i in range(num_image):
        img_in = nd.expand_dims(img_in_list[i], axis=0)
        plt.subplot(2, 4, i + 1)
        visual.visualize(img_in[0])
        img_out = netG(img_in.as_in_context(ctx))
        plt.subplot(2, 4, i + 5)
        visual.visualize(img_out[0])
    plt.show()
def run_ant_colony_algorithm(tsp_data, method, file, columns):
    results = []
    for name, data_dict in tsp_data:
        if 'line' not in data_dict:
            print(data_dict['data'])
            data_dict['line'] = haversine.haversine_matrix(
                data_dict['data']).tolist()
        line = np.array(data_dict['line'])
        print(f'line.shape={line.shape}')
        t1 = datetime.datetime.now()
        ant_colony_algorithm = AntColonyAlgorithm(name,
                                                  line,
                                                  epochs=500,
                                                  method=method,
                                                  early_stopping_rounds=100)
        route_info = ant_colony_algorithm.travelling()
        print(route_info)
        cost = datetime.datetime.now() - t1
        best_dist = route_info['elite']['best_dist']
        results.append([
            name,
            int(line.shape[1]), best_dist, 'ant colony', method, cost,
            datetime.datetime.now()
        ])
        print(f'name: {name}, best_dist: {best_dist}, cost: {cost}')
        assert best_dist > 0
        if 'data' in data_dict or 'coords' in data_dict:
            if 'coords' in data_dict:
                coords = data_dict['coords']
            else:
                coords = data_dict['data']
            print(best_dist)
            print(np.array(coords)[route_info['elite']['best_route']].tolist())
        else:
            coords = None
        visual.visualize(route_info,
                         coords,
                         algorithm_name='AntColonyAlgorithm',
                         name=name,
                         method=method)

    if file is not None:
        results = pd.DataFrame(results, columns=columns)
        results['cost_time'] = results['cost_time'].apply(
            lambda x: x.value / 10**9)
        results.to_csv(file, index=False, header=False, mode='a')
Beispiel #3
0
    def phenotype(self):
        """ Genotype to phenotype mapping (gets a graph visualization) """
        assert self.initialized == True, "Genome should be first initialized!"
        assert len(self.input_nodes) > 0, "There are no input nodes!"
        assert len(self.output_nodes) > 0, "There are no output nodes!"
        assert len(self.connection_genes) > 0, "There are no connection genes!"

        return visualize(self.input_nodes, self.output_nodes,
                         self.connection_genes)
Beispiel #4
0
def run():
    # Initialize
    darwinian = Evolution(15, 6)

    # Evolve
    darwinian.evolve(150)

    # Get the best agent
    best_genome = darwinian.get_best_genome()

    # Visualize the performance
    for i_episode in range(10):
        observation = darwinian.env.reset()
        for t in range(500):
            darwinian.env.render()

            action = best_genome.action(observation)
            observation, reward, done, info = darwinian.env.step(action)
            if done:
                print("Episode finished after {} timesteps".format(t+1))
                break

    # Display the network architecture
    visualize(best_genome.get_inputs(),
        best_genome.get_outputs(), best_genome.get_connections())

    # Plot the statistics
    stats = darwinian.get_statistics()
    plt.plot(stats[0], stats[1]) # average
    plt.plot(stats[0], stats[2]) # best
    plt.ylabel("Reward")
    plt.xlabel("Generation")
    plt.legend(['average', 'best'], loc='upper left')
    plt.show()

    # Sleep a bit
    time.sleep(1)
    darwinian.env.close()
Beispiel #5
0
def main(opt):
    ctx = mx.gpu() if opt.use_gpu else mx.cpu()
    testclasspaths = []
    testclasslabels = []
    print('loading test files')
    filename = '_testlist.txt'
    with open(opt.dataset + "_" + opt.expname + filename, 'r') as f:
        for line in f:
            testclasspaths.append(line.split(' ')[0])
            if int(line.split(' ')[1]) == -1:
                testclasslabels.append(0)
            else:
                testclasslabels.append(1)
    neworder = range(len(testclasslabels))
    neworder = shuffle(neworder)

    c = list(zip(testclasslabels, testclasspaths))
    print('shuffling')
    random.shuffle(c)

    #testclasslabels, testclasspaths = zip(*c)
    #testclasslabels = testclasslabels[1:5000]
    #testclasspaths = testclasspaths[1:5000]
    ltnt = 512
    print('loading pictures')
    test_data = load_image.load_test_images(testclasspaths, testclasslabels,
                                            opt.batch_size, opt.img_wd,
                                            opt.img_ht, ctx, opt.noisevar)
    print('picture loading done')
    netEn, netDe, netD, netD2, netDS = set_network(opt.depth, ctx, 0, 0,
                                                   opt.ndf, opt.ngf,
                                                   opt.append)
    netEn.load_params('checkpoints/' + opt.expname + '_' + str(opt.epochs) +
                      '_En.params',
                      ctx=ctx)
    netDe.load_params('checkpoints/' + opt.expname + '_' + str(opt.epochs) +
                      '_De.params',
                      ctx=ctx)
    netD.load_params('checkpoints/' + opt.expname + '_' + str(opt.epochs) +
                     '_D.params',
                     ctx=ctx)
    netD2.load_params('checkpoints/' + opt.expname + '_' + str(opt.epochs) +
                      '_D2.params',
                      ctx=ctx)
    netDS.load_params('checkpoints/' + opt.expname + '_' + str(opt.epochs) +
                      '_SD.params',
                      ctx=ctx)
    print('Model loading done')
    lbllist = []
    scorelist1 = []
    scorelist2 = []
    scorelist3 = []
    scorelist4 = []
    test_data.reset()
    count = 0
    for batch in (test_data):
        count += 1
        print(str(count))  #, end="\r")
        real_in = batch.data[0].as_in_context(ctx)
        real_out = batch.data[1].as_in_context(ctx)
        lbls = batch.label[0].as_in_context(ctx)
        code = netEn((real_out))
        code = code + nd.random.normal(
            loc=0, scale=0.002, shape=code.shape, ctx=ctx)
        outnn = (netDe(code))
        out_concat = nd.concat(real_out, outnn, dim=1) if opt.append else outnn
        output4 = nd.mean((netD(out_concat)), (1, 3, 2)).asnumpy()
        code = netEn(real_in)
        #code=codet+nd.random.normal(loc=0, scale=0.0000001, shape=code.shape,ctx=ctx)
        #code2=codet+nd.random.normal(loc=0, scale=0.000001, shape=code.shape,ctx=ctx)
        #eq_code = heq(code.asnumpy(),2)
        #code = nd.array(eq_code, ctx=ctx)
        out = netDe(code)
        #out2 = netDe(code2)
        out_concat = nd.concat(real_in, out, dim=1) if opt.append else out
        output = netD(out_concat)  #Denoised image
        output3 = nd.mean((out - real_out)**2,
                          (1, 3, 2)).asnumpy()  #denoised-real
        output = nd.mean(output, (1, 3, 2)).asnumpy()
        out_concat = nd.concat(real_out, real_out,
                               dim=1) if opt.append else real_out

        output2 = netDS(netDe(code))  #Image with no noise
        output2 = nd.mean(output2, (1, 3, 2)).asnumpy()
        lbllist = lbllist + list(lbls.asnumpy())
        scorelist1 = scorelist1 + list(output)
        scorelist2 = scorelist2 + list(output2)
        scorelist3 = scorelist3 + list(output3)
        scorelist4 = scorelist4 + list(output4)

        fake_img1 = nd.concat(real_in[0], real_out[0], out[0], outnn[0], dim=1)
        fake_img2 = nd.concat(real_in[1], real_out[1], out[1], outnn[1], dim=1)
        fake_img3 = nd.concat(real_in[2], real_out[2], out[2], outnn[2], dim=1)
        fake_img4 = nd.concat(real_in[3], real_out[3], out[3], outnn[3], dim=1)
        fake_img = nd.concat(fake_img1, fake_img2, fake_img3, fake_img4, dim=2)
        #print(np.shape(fake_img))
        visual.visualize(fake_img)
        plt.savefig('outputs/T_' + opt.expname + '_' + str(count) + '.png')
    if not opt.isvalidation:

        fpr, tpr, _ = roc_curve(lbllist, scorelist1, 1)
        roc_auc1 = auc(fpr, tpr)
        fpr, tpr, _ = roc_curve(lbllist, scorelist2, 1)
        roc_auc2 = auc(fpr, tpr)
        fpr, tpr, _ = roc_curve(lbllist, scorelist3, 1)
        roc_auc3 = auc(fpr, tpr)
        fpr, tpr, _ = roc_curve(lbllist, scorelist4, 1)
        roc_auc4 = auc(fpr, tpr)
        plt.gcf().clear()
        plt.clf()
        sns.set(color_codes=True)
        posscores = [
            scorelist3[i] for i, v in enumerate(lbllist) if int(v) == 1
        ]
        negscores = [
            scorelist3[i] for i, v in enumerate(lbllist) if int(v) == 0
        ]
        #sns.distplot(posscores, hist=False, label="Known Classes" ,rug=True)
        sns.kdeplot(posscores, label="Known Classes")
        sns.kdeplot(negscores, label="Unnown Classes")
        #plt.hold()
        #sns.distplot(negscores, hist=False, label = "Unknown Classes", rug=True);
        plt.legend()
        plt.savefig('outputs/matdist_' + opt.expname + '_.png')

        plt.gcf().clear()
        inputT = nd.zeros((ltnt, ltnt, 1, 1), ctx=ctx)
        for i in range(0, ltnt):
            inputT[i, i, :, :] = -1
        out = netDe(inputT)
        count = 0
        for i in range(int(math.ceil(math.sqrt(ltnt)))):
            for j in range(int(math.ceil(math.sqrt(ltnt)))):
                if count < ltnt:
                    plt.subplot(math.ceil(math.sqrt(ltnt)),
                                math.ceil(math.sqrt(ltnt)), count + 1)
                    plt.imshow(
                        ((out[count].asnumpy().transpose(1, 2, 0) + 1.0) *
                         127.5).astype(np.uint8))
                    plt.axis('off')
                count += 1
        plt.savefig('outputs/atoms_' + opt.expname + '_.png')
        plt.gcf().clear()
        plt.clf()
        return ([roc_auc1, roc_auc2, roc_auc3, roc_auc4])
    else:
        return ([0, 0, 0, 0])
    fakecode = nd.random_normal(loc=0,
                                scale=1,
                                shape=(16, 4096, 1, 1),
                                ctx=ctx)
    out = netDe(fakecode)
    fake_img1 = nd.concat(out[0], out[1], out[2], out[3], dim=1)
    fake_img2 = nd.concat(out[7], out[6], out[5], out[4], dim=1)
    fake_img3 = nd.concat(out[8], out[9], out[10], out[11], dim=1)
    fake_img4 = nd.concat(out[15], out[14], out[13], out[12], dim=1)
    fake_img = nd.concat(fake_img1, fake_img2, fake_img3, fake_img4, dim=2)
    #print(np.shape(fake_img))
    visual.visualize(fake_img)
    plt.savefig('outputs/fakes_' + opt.expname + '_.png')
Beispiel #6
0
def train(pool_size, epochs, train_data, val_data,  ctx, netEn, netDe,  netD, netD2, trainerEn, trainerDe, trainerD, trainerD2, lambda1, batch_size, expname,  append=True, useAE = False):
    tp_file = open(expname + "_trainloss.txt", "w")  
    tp_file.close()  
    text_file = open(expname + "_validtest.txt", "w")
    text_file.close()
    #netGT, netDT, _, _ = set_test_network(opt.depth, ctx, opt.lr, opt.beta1,opt.ndf,  opt.ngf, opt.append)
    GAN_loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
    L1_loss = gluon.loss.L2Loss()
    image_pool = imagePool.ImagePool(pool_size)
    metric = mx.metric.CustomMetric(facc)
    metric2 = mx.metric.CustomMetric(facc)
    metricMSE = mx.metric.MSE()
    loss_rec_G = []
    loss_rec_D = []
    loss_rec_R = []
    acc_rec = []
    acc2_rec = []
    loss_rec_D2 = []
    loss_rec_G2 = []
    lr = 0.002
    #mu = nd.random_normal(loc=0, scale=1, shape=(batch_size/2,64,1,1), ctx=ctx) 
    mu = nd.random.uniform(low= -1, high=1, shape=(batch_size/2,64,1,1),ctx=ctx)
    #mu =  nd.zeros((batch_size/2,64,1,1),ctx=ctx)
    sigma = nd.ones((64,1,1),ctx=ctx)
    mu.attach_grad()
    sigma.attach_grad()    
    stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
    logging.basicConfig(level=logging.DEBUG)
    for epoch in range(epochs):

        tic = time.time()
        btic = time.time()
        train_data.reset()
        iter = 0
        #print('learning rate : '+str(trainerD.learning_rate ))
        for batch in train_data:
            ############################
            # (1) Update D network: maximize log(D(x, y)) + log(1 - D(x, G(x, z)))
            ###########################
            real_in = batch.data[0].as_in_context(ctx)
            real_out = batch.data[1].as_in_context(ctx)
            fake_latent= netEn(real_in)
            #real_latent = nd.random_normal(loc=0, scale=1, shape=fake_latent.shape, ctx=ctx)
            real_latent = nd.multiply(nd.power(sigma,2),nd.random_normal(loc=0, scale=1, shape=fake_latent.shape, ctx=ctx))
	    #nd.random.uniform( low=-1, high=1, shape=fake_latent.shape, ctx=ctx)
	    fake_out = netDe(fake_latent)
            fake_concat =  nd.concat(real_in, fake_out, dim=1) if append else  fake_out
            with autograd.record():
                # Train with fake image
                # Use image pooling to utilize history imagesi
                output = netD(fake_concat)
                output2 = netD2(fake_latent)
                fake_label = nd.zeros(output.shape, ctx=ctx)
                fake_latent_label = nd.zeros(output2.shape, ctx=ctx)
		noiseshape = (fake_latent.shape[0]/2,fake_latent.shape[1],fake_latent.shape[2],fake_latent.shape[3])
                eps2 = nd.multiply(nd.power(sigma,2),nd.random_normal(loc=0, scale=1, shape=fake_latent.shape, ctx=ctx))
		#eps2 = nd.random_normal(loc=0, scale=sigma.asscalar(), shape=fake_latent.shape, ctx=ctx) #
		#eps = nd.random.uniform( low=-1, high=1, shape=noiseshape, ctx=ctx)
		rec_output = netD(netDe(eps2))
                errD_fake = GAN_loss(rec_output, fake_label)
                errD_fake2 = GAN_loss(output, fake_label)
                errD2_fake = GAN_loss(output2, fake_latent_label)
                metric.update([fake_label, ], [output, ])
                metric2.update([fake_latent_label, ], [output2, ])
                real_concat =  nd.concat(real_in, real_out, dim=1) if append else  real_out
                output = netD(real_concat)
                output2 = netD2(real_latent)
                real_label = nd.ones(output.shape, ctx=ctx)
                real_latent_label =  nd.ones(output2.shape, ctx=ctx)
                errD_real = GAN_loss(output, real_label)
                errD2_real =  GAN_loss(output2, real_latent_label)
                #errD = (errD_real + 0.5*(errD_fake+errD_fake2)) * 0.5
                errD = (errD_real + errD_fake) * 0.5
                errD2 = (errD2_real + errD2_fake) * 0.5
		totalerrD = errD+errD2
                totalerrD.backward()
                #errD2.backward()
                metric.update([real_label, ], [output, ])
            	metric2.update([real_latent_label, ], [output2, ])
            trainerD.step(batch.data[0].shape[0])
            trainerD2.step(batch.data[0].shape[0])
            ############################
            # (2) Update G network: maximize log(D(x, G(x, z))) - lambda1 * L1(y, G(x, z))
            ###########################
            with autograd.record():
		sh = fake_latent.shape
		eps2 = nd.multiply(nd.power(sigma,2),nd.random_normal(loc=0, scale=1, shape=fake_latent.shape, ctx=ctx))
                #eps2 = nd.random_normal(loc=0, scale=sigma.asscalar(), shape=fake_latent.shape, ctx=ctx) #
		#eps = nd.random.uniform( low=-1, high=1, shape=noiseshape, ctx=ctx)
		rec_output = netD(netDe(eps2))
                fake_latent= (netEn(real_in))
                output2 = netD2(fake_latent)
                fake_out = netDe(fake_latent)
                fake_concat =  nd.concat(real_in, fake_out, dim=1) if append else  fake_out
                output = netD(fake_concat)
                real_label = nd.ones(output.shape, ctx=ctx)
                real_latent_label = nd.ones(output2.shape, ctx=ctx)
                errG2 = GAN_loss(rec_output, real_label)
                errR = L1_loss(real_out, fake_out) * lambda1
		errG = 10.0*GAN_loss(output2, real_latent_label)+errG2+errR+nd.mean(nd.power(sigma,2))
		errG.backward()
	    if epoch>50:
	    	sigma -= lr / sigma.shape[0] * sigma.grad
	    	print(sigma)
            trainerDe.step(batch.data[0].shape[0])
            trainerEn.step(batch.data[0].shape[0])
            loss_rec_G2.append(nd.mean(errG2).asscalar())
            loss_rec_G.append(nd.mean(nd.mean(errG)).asscalar()-nd.mean(errG2).asscalar()-nd.mean(errR).asscalar())
            loss_rec_D.append(nd.mean(errD).asscalar())
            loss_rec_R.append(nd.mean(errR).asscalar())
            loss_rec_D2.append(nd.mean(errD2).asscalar())
            _, acc2 = metric2.get()
            name, acc = metric.get()
            acc_rec.append(acc)
            acc2_rec.append(acc2)

            # Print log infomation every ten batches
            if iter % 10 == 0:
                _, acc2 = metric2.get()
                name, acc = metric.get()
                logging.info('speed: {} samples/s'.format(batch_size / (time.time() - btic)))
                #print(errD)
		logging.info('discriminator loss = %f, D2 loss = %f, generator loss = %f, G2 loss = %f,  binary training acc = %f , D2 acc = %f, reconstruction error= %f  at iter %d epoch %d'
                    	% (nd.mean(errD).asscalar(),nd.mean(errD2).asscalar(),
                      	nd.mean(errG-errG2-errR).asscalar(),nd.mean(errG2).asscalar(), acc,acc2,nd.mean(errR).asscalar() ,iter, epoch))
                iter = iter + 1
        btic = time.time()
        name, acc = metric.get()
        _, acc2 = metric2.get()
        tp_file = open(expname + "_trainloss.txt", "a")
        tp_file.write(str(nd.mean(errG2).asscalar()) + " " + str(
            nd.mean(nd.mean(errG)).asscalar() - nd.mean(errG2).asscalar() - nd.mean(errR).asscalar()) + " " + str(
            nd.mean(errD).asscalar()) + " " + str(nd.mean(errD2).asscalar()) + " " + str(nd.mean(errR).asscalar()) +" "+str(acc) + " " + str(acc2)+"\n")
        tp_file.close()
        metric.reset()
        metric2.reset()
        train_data.reset()

        logging.info('\nbinary training acc at epoch %d: %s=%f' % (epoch, name, acc))
        logging.info('time: %f' % (time.time() - tic))
        if epoch%10 ==0:# and epoch>0:
            text_file = open(expname + "_validtest.txt", "a")
            filename = "checkpoints/"+expname+"_"+str(epoch)+"_D.params"
            netD.save_params(filename)
            filename = "checkpoints/"+expname+"_"+str(epoch)+"_D2.params"
            netD2.save_params(filename)
            filename = "checkpoints/"+expname+"_"+str(epoch)+"_En.params"
            netEn.save_params(filename)
            filename = "checkpoints/"+expname+"_"+str(epoch)+"_De.params"
            netDe.save_params(filename)
            fake_img1 = nd.concat(real_in[0],real_out[0], fake_out[0], dim=1)
            fake_img2 = nd.concat(real_in[1],real_out[1], fake_out[1], dim=1)
            fake_img3 = nd.concat(real_in[2],real_out[2], fake_out[2], dim=1)
            fake_img4 = nd.concat(real_in[3],real_out[3], fake_out[3], dim=1)
            val_data.reset()
            text_file = open(expname + "_validtest.txt", "a")
            for vbatch in val_data:
                
                real_in = vbatch.data[0].as_in_context(ctx)
                real_out = vbatch.data[1].as_in_context(ctx)
                fake_latent= netEn(real_in)
                y = netDe(fake_latent)
                fake_out = y
                metricMSE.update([fake_out, ], [real_out, ])
            _, acc2 = metricMSE.get()
            text_file.write("%s %s %s\n" % (str(epoch), nd.mean(errR).asscalar(), str(acc2)))
            metricMSE.reset()
	    images = netDe(eps2)
            fake_img1T = nd.concat(images[0],images[1], images[2], dim=1)
	    fake_img2T = nd.concat(images[3],images[4], images[5], dim=1)
            fake_img3T = nd.concat(images[6],images[7], images[8], dim=1)
            fake_img = nd.concat(fake_img1T,fake_img2T, fake_img3T,dim=2)
            visual.visualize(fake_img)
            plt.savefig('outputs/'+expname+'_fakes_'+str(epoch)+'.png')
            text_file.close()

	    # Do 10 iterations of sampler update
	    fake_img1T = nd.concat(real_in[0],real_out[0], fake_out[0], dim=1)
            fake_img2T = nd.concat(real_in[1],real_out[1], fake_out[1], dim=1)
            fake_img3T = nd.concat(real_in[2],real_out[2], fake_out[2], dim=1)
            #fake_img4T = nd.concat(real_in[3],real_out[3], fake_out[3], dim=1)
            fake_img = nd.concat(fake_img1,fake_img2, fake_img3,fake_img1T,fake_img2T, fake_img3T,dim=2)
            visual.visualize(fake_img)
            plt.savefig('outputs/'+expname+'_'+str(epoch)+'.png')
	    '''if epoch > 100:
	      for ep2 in range(10):
	    	with autograd.record():
                	#eps = nd.random_normal(loc=0, scale=1, shape=noiseshape, ctx=ctx) #
			eps = nd.random.uniform( low=-1, high=1, shape=noiseshape, ctx=ctx)
			eps2 = nd.random_normal(loc=0, scale=0.02, shape=noiseshape, ctx=ctx)
                	eps2 = nd.tanh(eps2*sigma+mu)
                	eps2 = nd.concat(eps,eps2,dim=0)
			rec_output = netD(netDe(eps2))
			fake_label = nd.zeros(rec_output.shape, ctx=ctx)
                	errGS = GAN_loss(rec_output, fake_label)
    			errGS.backward()
		mu -= lr / mu.shape[0] * mu.grad
		sigma -= lr / sigma.shape[0] * sigma.grad
	    	print('mu ' + str(mu[0,0,0,0].asnumpy())+ '  sigma '+ str(sigma[0,0,0,0].asnumpy()))
	    '''
	    images = netDe(eps2)
            fake_img1T = nd.concat(images[0],images[1], images[2], dim=1)
            fake_img2T = nd.concat(images[3],images[4], images[5], dim=1)
            fake_img3T = nd.concat(images[6],images[7], images[8], dim=1)
            fake_img = nd.concat(fake_img1T,fake_img2T, fake_img3T,dim=2)
            visual.visualize(fake_img)
            plt.savefig('outputs/'+expname+'_fakespost_'+str(epoch)+'.png')
    return([loss_rec_D,loss_rec_G, loss_rec_R, acc_rec, loss_rec_D2, loss_rec_G2, acc2_rec])
Beispiel #7
0
folders = range(0,10)
for classname in [0]: #folders:

    	#ctx = mx.gpu() if opt.use_gpu else mx.cpu()
	
	

    	netEn,netDe, netD, netD2 = vaetest.set_network(opt.depth, ctx, 0, 0, opt.ndf, opt.ngf, opt.append)
    	netEn.load_params('checkpoints/'+opt.expname+'_'+str(opt.epochs)+'_En.params', ctx=ctx)
    	netDe.load_params('checkpoints/'+opt.expname+'_'+str(opt.epochs)+'_De.params', ctx=ctx)
    	netD.load_params('checkpoints/'+opt.expname+'_'+str(opt.epochs)+'_D.params', ctx=ctx)
    	netD2.load_params('checkpoints/'+opt.expname+'_'+str(opt.epochs)+'_D2.params', ctx=ctx)
	print(ctx)
	test_data = test_data.next()
	print(np.shape(test_data.data))
	imc1 = netEn(test_data.data[0][0].expand_dims(1).as_in_context(ctx))
	im1 = netDe(imc1)
        imc2 = netEn(test_data.data[0][1].expand_dims(1).as_in_context(ctx))
        im2 = netDe(imc2)
	print(np.shape(im1))
	fakecode = nd.random_normal(loc=0, scale=1, shape=(16, 32,1,1), ctx=ctx)
	out = netDe(fakecode)
        fake_img1 = nd.concat(im1,out[1], out[2], out[3],dim=1)
	fake_img2 = nd.concat(out[7],out[6], out[5], out[4],dim=1)
	fake_img3 = nd.concat(out[8],out[9], out[10], out[11],dim=1)
	fake_img4 = nd.concat(out[15],out[14], out[13], im2 ,dim=1)        
	fake_img = nd.concat(fake_img1,fake_img2, fake_img3,fake_img4, dim=2)
        #print(np.shape(fake_img))
        visual.visualize(fake_img)
        plt.savefig('outputs/fakes_'+opt.expname+'_.png')
Beispiel #8
0
        if n_batch == TOTAL_BATCHES / 2 - 1 or n_batch == TOTAL_BATCHES - 1:  # to make logs shorter
            if verbose:
                print 'Error in Training batch %d/%d/%d/%d' % (
                    n_batch + 1, TOTAL_BATCHES, i, EPOCHS)
        GW = (GW *
              (n_batch - 1) + gW) / n_batch  # Accumulate average gradient of W
        GB = (GB *
              (n_batch - 1) + gB) / n_batch  # Accumulate average gradient of B
        if err < Err:
            Err = err  # Accumulate minimum mini-batch error
    if verbose:
        print 'Min error in epoch %d/%d is %f' % (i, EPOCHS, Err)
    VW = VW * zeta - alpha * GW
    VB = VB * zeta - alpha * GB
    L.W.set_value(L.W.get_value() + VW)
    L.B.set_value(L.B.get_value() + VB)

    Q = abs(array(test(Ts[0])) - Ts[1])
    if verbose:
        print m_test_sample - count_nonzero(Q)
    errAcc.append(Err)

plot(errAcc)
vis = visualize(L.W.get_value().transpose())
imwrite('weights' + '.png', vis)
savefig('plot_' + str(EPOCHS) + '_' + str(TOTAL_BATCHES) + '_' + str(alpha) +
        '_' + str(zeta) + '_' + str(lam) + 'best' + '.png')
fl = open('LR_weights', 'wb')
import pickle
pickle.dump([L.W.get_value(), L.B.get_value()], fl)
fl.close()
Beispiel #9
0
def train(pool_size, epochs, train_data, ctx, netEn, netDe, netD, trainerEn, trainerDe, trainerD, lambda1, batch_size, expname):

    threewayloss =gluon.loss.SoftmaxCrossEntropyLoss()
    GAN_loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
    L1_loss = gluon.loss.L1Loss()
    image_pool = imagePool.ImagePool(pool_size)
    metric = mx.metric.CustomMetric(facc)

    stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
    logging.basicConfig(level=logging.DEBUG)

    for epoch in range(epochs):
        tic = time.time()
        btic = time.time()
        train_data.reset()
        iter = 0
        for batch in train_data:
            ############################
            # (1) Update D network: maximize log(D(x, y)) + log(1 - D(x, G(x, z)))
            ###########################
            real_in = batch.data[0].as_in_context(ctx)
            real_out = batch.data[1].as_in_context(ctx)
            tempout = netEn(real_in)
            fake_out = netDe(tempout)
            fake_concat = fake_out
            #fake_concat = image_pool.query(fake_out)
            #fake_concat = image_pool.query(nd.concat(real_in, fake_out, dim=1))
            with autograd.record():
                # Train with fake image
                # Use image pooling to utilize history images
                output = netD(fake_concat)
                fake_label = nd.zeros(output.shape[0], ctx=ctx)
                errD_fake = threewayloss(output, fake_label)
                metric.update([fake_label, ], [output, ])

                

                # Train with real image
                real_concat = real_out
                output = netD(real_concat)
                real_label = nd.ones(output.shape[0], ctx=ctx)
                errD_real = threewayloss(output, real_label)
                metric.update([real_label, ], [output, ])


                #train with abnormal image
                abinput = nd.random.uniform(-1,1,tempout.shape,ctx=ctx)
                aboutput =netD( netDe(abinput))
		#print(aboutput.shape)
		#print(output.shape)
                ab_label = 2*nd.ones(aboutput.shape[0], ctx=ctx)
                errD_ab = threewayloss(aboutput, ab_label)
                errD = (errD_real + errD_fake + errD_ab) * 0.33
                errD.backward()
                

            trainerD.step(batch.data[0].shape[0])

            ############################
            # (2) Update G network: maximize log(D(x, G(x, z))) - lambda1 * L1(y, G(x, z))
            ###########################
            with autograd.record():
                fake_out = netDe(netEn(real_in))
                fake_concat = fake_out
                output = netD(fake_concat)
                real_label = nd.ones(output.shape[0], ctx=ctx)
                errG = threewayloss(output, real_label) + L1_loss(real_out, fake_out) * lambda1
                errR = L1_loss(real_out, fake_out)
                errG.backward()

            trainerEn.step(batch.data[0].shape[0])
            trainerDe.step(batch.data[0].shape[0])

            # Print log infomation every ten batches
            if iter % 10 == 0:
                name, acc = metric.get()
                logging.info('speed: {} samples/s'.format(batch_size / (time.time() - btic)))
                logging.info(
                    'discriminator loss = %f, generator loss = %f, latent error = %f,  binary training acc = %f, reconstruction error= %f at iter %d epoch %d'
                    % (nd.mean(errD).asscalar(),
                       nd.mean(errG).asscalar(), nd.mean(errD_ab).asscalar()   , acc,nd.mean(errR).asscalar() ,iter, epoch))
            iter = iter + 1
            btic = time.time()

        name, acc = metric.get()
        metric.reset()
        logging.info('\nbinary training acc at epoch %d: %s=%f' % (epoch, name, acc))
        logging.info('time: %f' % (time.time() - tic))
        if epoch%10 ==0:
            filename = "checkpoints/"+expname+"_"+str(epoch)+"_D.params"
            netD.save_params(filename)
            filename = "checkpoints/"+expname+"_"+str(epoch)+"_En.params"
            netEn.save_params(filename)
            filename = "checkpoints/"+expname+"_"+str(epoch)+"_De.params"
            netDe.save_params(filename)
            # Visualize one generated image for each epoch
            fake_img = nd.concat(real_in[0],real_out[0], fake_out[0], dim=1)
            visual.visualize(fake_img)
            plt.savefig('outputs/'+expname+'_'+str(epoch)+'.png')
Beispiel #10
0
	Err = 0
	for x in range(0, m_sample-MINI_BATCH, MINI_BATCH):
		err, gW, gB1, gB2 = train(Tr[0][x:x+MINI_BATCH,:])
		n_batch += 1
		GW = (GW*(n_batch-1) + gW)/n_batch # Accumulate average gradient of W
		GB1 = (GB1*(n_batch-1) + gB1)/n_batch # Accumulate average gradient of B1
		GB2 = (GB2*(n_batch-1) + gB2)/n_batch # Accumulate average gradient of B2
		Err = (Err * (n_batch-1) + err)/n_batch
	
	print 'Avg error in epoch %d/%d is %f' % (i, EPOCHS, Err)
	errAcc.append(Err)
	VW = VW*zeta - alpha * GW
	VB1 = VB1*zeta - alpha * GB1
	VB2 = VB2*zeta - alpha * GB2
	CA.W.set_value( CA.W.get_value() + VW )
	CA.B1.set_value( CA.B1.get_value() + VB1 )
	CA.B2.set_value( CA.B2.get_value() + VB2 )

	if i%100 == 0 or i==EPOCHS-1 or i==EPOCHS-2:
		fl = open('ca_weights', 'wb')
		pickle.dump([CA.W.get_value(), CA.B1.get_value(), CA.B2.get_value()], fl)
		fl.close()
		print 'Weights written to file'
		del fl
		hEnc = reconstruct( Ts[0][:20,:] )
		line = visualize( hEnc, (28, 28) )
		imwrite( 'Epoch'+str(i)+'.png', line )
		alpha = alpha*0.95
		plot(errAcc)
		savefig('errplot_cAE'+str(i)+'.png')
Beispiel #11
0
def train(pool_size, epochs, train_data, val_data,  ctx, netEn, netDe,  netD, trainerEn, trainerDe, trainerD, lambda1, batch_size, expname, append=True, useAE = False):
    
    text_file = open(expname + "_validtest.txt", "w")
    text_file.close()
    #netGT, netDT, _, _ = set_test_network(opt.depth, ctx, opt.lr, opt.beta1,opt.ndf,  opt.ngf, opt.append)
    GAN_loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
    L1_loss = gluon.loss.L2Loss()
    image_pool = imagePool.ImagePool(pool_size)
    metric = mx.metric.CustomMetric(facc)
    metric2 = mx.metric.MSE()
    loss_rec_G = []
    loss_rec_D = []
    loss_rec_R = []
    acc_rec = []
    stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
    logging.basicConfig(level=logging.DEBUG)
    for epoch in range(epochs):

        tic = time.time()
        btic = time.time()
        train_data.reset()
        iter = 0
        #print('learning rate : '+str(trainerD.learning_rate ))
	for batch in train_data:
            ############################
            # (1) Update D network: maximize log(D(x, y)) + log(1 - D(x, G(x, z)))
            ###########################
            real_in = batch.data[0].as_in_context(ctx)
            real_out = batch.data[1].as_in_context(ctx)
            soft_zero = 1e-10
            fake_latent= netEn(real_in)
	    fake_latent = np.squeeze(fake_latent)
            mu_lv = nd.split(fake_latent, axis=1, num_outputs=2)
	    mu = (mu_lv[0])
            lv = (mu_lv[1])
	    KL = 0.5*nd.nansum(1+lv-mu*mu-nd.exp(lv+soft_zero))
            eps = nd.random_normal(loc=0, scale=1, shape=(batch_size, 2048), ctx=ctx)
            z = mu + nd.exp(0.5*lv)*eps
	    z = nd.expand_dims(nd.expand_dims(z,2),2)
            y = netDe(z)
            fake_out = y
	    
	    logloss = nd.nansum(real_in*nd.log(y+soft_zero)+ (1-real_in)*nd.log(1-y+soft_zero))
            loss = -logloss-KL
            fake_concat =  nd.concat(real_in, fake_out, dim=1) if append else  fake_out
            with autograd.record():
                # Train with fake image
                # Use image pooling to utilize history imagesi
                output = netD(fake_concat)
                fake_label = nd.zeros(output.shape, ctx=ctx)
                errD_fake = GAN_loss(output, fake_label)
                metric.update([fake_label, ], [output, ])
                real_concat =  nd.concat(real_in, real_out, dim=1) if append else  real_out
                output = netD(real_concat)
                real_label = nd.ones(output.shape, ctx=ctx)
                errD_real = GAN_loss(output, real_label)
                errD = (errD_real + errD_fake) * 0.5
                errD.backward()
                metric.update([real_label, ], [output, ])

            trainerD.step(batch.data[0].shape[0])

            ############################
            # (2) Update G network: maximize log(D(x, G(x, z))) - lambda1 * L1(y, G(x, z))
            ###########################
            with autograd.record():

                fake_latent= np.squeeze(netEn(real_in))
                mu_lv = nd.split(fake_latent, axis=1, num_outputs=2)
                mu = mu_lv[0]
                lv = mu_lv[1]
		KL = 0.5*nd.nansum(1+lv-mu*mu-nd.exp(lv+soft_zero))
                eps = nd.random_normal(loc=0, scale=1, shape=(batch_size, 2048), ctx=ctx)
		#KL = 0.5*nd.nansum(1+lv-mu*mu-nd.exp(lv+soft_zero))
                z = mu + nd.exp(0.5*lv)*eps
		z = nd.expand_dims(nd.expand_dims(z,2),2)
                y = netDe(z)
                fake_out = y
		logloss = nd.nansum((real_in+1)*0.5*nd.log(0.5*(y+1)+soft_zero)+ (1-0.5*(real_in+1))*nd.log(1-0.5*(y+1)+soft_zero))
                loss =-logloss-KL
                fake_concat =  nd.concat(real_in, fake_out, dim=1) if append else  fake_out
                output = netD(fake_concat)
                real_label = nd.ones(output.shape, ctx=ctx)
                errG = GAN_loss(output, real_label) + loss*lambda1 #L1_loss(real_out, fake_out) * lambda1
                errR = logloss#L1_loss(real_out, fake_out)
                errG.backward()
        trainerDe.step(batch.data[0].shape[0])	   
        trainerEn.step(batch.data[0].shape[0])
        loss_rec_G.append(nd.mean(errG).asscalar()-nd.mean(errR).asscalar()*lambda1)
        loss_rec_D.append(nd.mean(errD).asscalar())
        loss_rec_R.append(nd.mean(errR).asscalar())
        name, acc = metric.get()
        acc_rec.append(acc)
        # Print log infomation every ten batches
        if iter % 10 == 0:
                name, acc = metric.get()
                logging.info('speed: {} samples/s'.format(batch_size / (time.time() - btic)))
                #print(errD)
		logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f reconstruction error= %f at iter %d epoch %d'
                    	% (nd.mean(errD).asscalar(),
                      	nd.mean(errG).asscalar(), acc,nd.mean(errR).asscalar() ,iter, epoch))
        iter = iter + 1
        btic = time.time()

        name, acc = metric.get()
        metric.reset()
        train_data.reset()

        logging.info('\nbinary training acc at epoch %d: %s=%f' % (epoch, name, acc))
        logging.info('time: %f' % (time.time() - tic))
        if epoch%10 ==0:
            text_file = open(expname + "_validtest.txt", "a")
            filename = "checkpoints/"+expname+"_"+str(epoch)+"_D.params"
            netD.save_params(filename)
            filename = "checkpoints/"+expname+"_"+str(epoch)+"_En.params"
            netEn.save_params(filename)
            filename = "checkpoints/"+expname+"_"+str(epoch)+"_De.params"
            netDe.save_params(filename)
            fake_img1 = nd.concat(real_in[0],real_out[0], fake_out[0], dim=1)
            fake_img2 = nd.concat(real_in[1],real_out[1], fake_out[1], dim=1)
            fake_img3 = nd.concat(real_in[2],real_out[2], fake_out[2], dim=1)
            fake_img4 = nd.concat(real_in[3],real_out[3], fake_out[3], dim=1)
            val_data.reset()
            text_file = open(expname + "_validtest.txt", "a")
            for vbatch in val_data:
                
            	real_in = vbatch.data[0].as_in_context(ctx)
            	real_out = vbatch.data[1].as_in_context(ctx)

            	fake_latent= netEn(real_in)
            	mu_lv = nd.split(fake_latent, axis=1, num_outputs=2)
            	mu = mu_lv[0]
            	lv = mu_lv[1]
            	eps = nd.random_normal(loc=0, scale=1, shape=(batch_size/5, 2048,1,1), ctx=ctx)
            	z = mu + nd.exp(0.5*lv)*eps
            	y = netDe(z)
            	fake_out = y
            	KL = 0.5*nd.sum(1+lv-mu*mu-nd.exp(lv),axis=1)
            	logloss = nd.sum(real_in*nd.log(y+soft_zero)+ (1-real_in)*nd.log(1-y+soft_zero), axis=1)
            	loss = logloss+KL
            	metric2.update([fake_out, ], [real_out, ])
            	_, acc2 = metric2.get()
            text_file.write("%s %s %s\n" % (str(epoch), nd.mean(errR).asscalar(), str(acc2)))
            metric2.reset()

            fake_img1T = nd.concat(real_in[0],real_out[0], fake_out[0], dim=1)
            fake_img2T = nd.concat(real_in[1],real_out[1], fake_out[1], dim=1)
            fake_img3T = nd.concat(real_in[2],real_out[2], fake_out[2], dim=1)
            #fake_img4T = nd.concat(real_in[3],real_out[3], fake_out[3], dim=1)
            fake_img = nd.concat(fake_img1,fake_img2, fake_img3,fake_img1T,fake_img2T, fake_img3T,dim=2)
            visual.visualize(fake_img)
            plt.savefig('outputs/'+expname+'_'+str(epoch)+'.png')
            text_file.close()
    return([loss_rec_D,loss_rec_G, loss_rec_R, acc_rec])
Beispiel #12
0
def train(cep,
          pool_size,
          epochs,
          train_data,
          val_data,
          ctx,
          netEn,
          netDe,
          netD,
          netD2,
          netDS,
          trainerEn,
          trainerDe,
          trainerD,
          trainerD2,
          trainerSD,
          lambda1,
          batch_size,
          expname,
          append=True,
          useAE=False):
    tp_file = open(expname + "_trainloss.txt", "w")
    tp_file.close()
    text_file = open(expname + "_validtest.txt", "w")
    text_file.close()
    #netGT, netDT, _, _ = set_test_network(opt.depth, ctx, opt.lr, opt.beta1,opt.ndf,  opt.ngf, opt.append)
    GAN_loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
    L1_loss = gluon.loss.L2Loss()
    image_pool = imagePool.ImagePool(pool_size)
    metric = mx.metric.CustomMetric(facc)
    metric2 = mx.metric.CustomMetric(facc)
    metricStrong = mx.metric.CustomMetric(facc)
    metricMSE = mx.metric.MSE()
    loss_rec_G = []
    loss_rec_D = []
    loss_rec_R = []
    acc_rec = []
    acc2_rec = []
    loss_rec_D2 = []
    loss_rec_G2 = []
    lr = 2.0 * 512
    stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
    logging.basicConfig(level=logging.DEBUG)
    if cep == -1:
        cep = 0
    else:
        netEn.load_params('checkpoints/' + opt.expname + '_' + str(cep) +
                          '_En.params',
                          ctx=ctx)
        netDe.load_params('checkpoints/' + opt.expname + '_' + str(cep) +
                          '_De.params',
                          ctx=ctx)
        netD.load_params('checkpoints/' + opt.expname + '_' + str(cep) +
                         '_D.params',
                         ctx=ctx)
        netD2.load_params('checkpoints/' + opt.expname + '_' + str(cep) +
                          '_D2.params',
                          ctx=ctx)
        netDS.load_params('checkpoints/' + opt.expname + '_' + str(cep) +
                          '_SD.params',
                          ctx=ctx)
    iter = 0
    for epoch in range(cep + 1, epochs):

        tic = time.time()
        btic = time.time()
        train_data.reset()
        #print('learning rate : '+str(trainerD.learning_rate ))
        for batch in train_data:
            ############################
            # (1) Update D network: maximize log(D(x, y)) + log(1 - D(x, G(x, z)))
            ###########################
            if ctx == mx.cpu():
                ct = mx.cpu()
            else:
                ct = mx.gpu()
            real_in = batch.data[0]  #.as_in_context(ctx)
            real_out = batch.data[1]  #.as_in_context(ctx)
            if iter == 0:
                latent_shape = (batch_size, 512, 1, 1)  #code.shape
                out_l_shape = (batch_size, 1, 1, 1)  #netD2((code)).shape
                out_i_shape = (batch_size, 1, 1, 1)  #netD(netDe(code)).shape
                out_s_shape = (batch_size, 1, 1, 1)  #netSD(netDe(code)).shape
            real_in = gluon.utils.split_and_load(real_in, ctx)
            real_out = gluon.utils.split_and_load(real_out, ctx)
            fake_latent = [netEn(r) for r in real_in]
            real_latent = nd.random.uniform(low=-1, high=1, shape=latent_shape)
            real_latent = gluon.utils.split_and_load(real_latent, ctx)
            fake_out = [netDe(f) for f in fake_latent]
            fake_concat = nd.concat(real_in, fake_out,
                                    dim=1) if append else fake_out
            eps2 = nd.random.uniform(low=-1,
                                     high=1,
                                     shape=latent_shape,
                                     ctx=ct)
            eps2 = gluon.utils.split_and_load(eps2, ctx)
            if epoch > 150:  # (1/float(batch_size))*512*150:# and epoch%10==0:
                print('Mining..')
                mu = nd.random.uniform(low=-1,
                                       high=1,
                                       shape=latent_shape,
                                       ctx=ct)
                #isigma = nd.ones((batch_size,64,1,1),ctx=ctx)*0.000001
                mu.attach_grad()
                #sigma.attach_grad()
                images = netDe(mu)
                fake_img1T = nd.concat(images[0], images[1], images[2], dim=1)
                fake_img2T = nd.concat(images[3], images[4], images[5], dim=1)
                fake_img3T = nd.concat(images[6], images[7], images[8], dim=1)
                fake_img = nd.concat(fake_img1T, fake_img2T, fake_img3T, dim=2)
                visual.visualize(fake_img)
                plt.savefig('outputs/' + expname + '_fakespre_' + str(epoch) +
                            '.png')
                eps2 = gluon.utils.split_and_load(mu, ctx)
                for e in eps2:
                    e.attach_grad()
                for ep2 in range(1):
                    with autograd.record():
                        #eps = nd.random_normal(loc=0, scale=1, shape=fake_latent.shape, ctx=ctx) #
                        #eps2 = gluon.utils.split_and_load(nd.tanh(mu),ctx) #+nd.multiply(eps,sigma))#nd.random.uniform( low=-1, high=1, shape=fake_latent.shape, ctx=ctx)
                        rec_output = [netDS(netDe(e)) for e in eps2]
                        fake_label = gluon.utils.split_and_load(
                            nd.zeros(out_s_shape), ctx)
                        errGS = [
                            GAN_loss(r, f)
                            for r, f in zip(rec_output, fake_label)
                        ]
                        for e in errGS:
                            e.backward()
                    for idx, _ in enumerate(eps2):
                        eps2[idx] = nd.tanh(eps2[idx] -
                                            lr / eps2[idx].shape[0] *
                                            eps2[idx].grad)
                images = netDe((eps2[0]))
                fake_img1T = nd.concat(images[0], images[1], images[2], dim=1)
                fake_img2T = nd.concat(images[3], images[4], images[5], dim=1)
                fake_img3T = nd.concat(images[6], images[7], images[8], dim=1)
                fake_img = nd.concat(fake_img1T, fake_img2T, fake_img3T, dim=2)
                visual.visualize(fake_img)
                plt.savefig('outputs/' + expname + str(ep2) + '_fakespost_' +
                            str(epoch) + '.png')
                #eps2 = nd.tanh(mu)#+nd.multiply(eps,sigma))#nd.random.uniform( low=-1, high=1, shape=fake_latent.shape, ctx=ctx)

            with autograd.record():
                #eps2 = gluon.utils.split_and_load(eps2,ctx)
                # Train with fake image
                # Use image pooling to utilize history imagesi
                output = [netD(f) for f in fake_concat]
                output2 = [netD2(f) for f in fake_latent]
                fake_label = nd.zeros(out_i_shape)
                fake_label = gluon.utils.split_and_load(fake_label, ctx)
                fake_latent_label = nd.zeros(out_l_shape)
                fake_latent_label = gluon.utils.split_and_load(
                    fake_latent_label, ctx)
                eps = gluon.utils.split_and_load(
                    nd.random.uniform(low=-1, high=1, shape=latent_shape), ctx)
                rec_output = [netD(netDe(e)) for e in eps]
                errD_fake = [
                    GAN_loss(r, f) for r, f in zip(rec_output, fake_label)
                ]
                errD_fake2 = [
                    GAN_loss(o, f) for o, f in zip(output, fake_label)
                ]
                errD2_fake = [
                    GAN_loss(o, f) for o, f in zip(output2, fake_latent_label)
                ]
                for f, o in zip(fake_label, rec_output):
                    metric.update([
                        f,
                    ], [
                        o,
                    ])
                for f, o in zip(fake_latent_label, output2):
                    metric2.update([
                        f,
                    ], [
                        o,
                    ])
                real_concat = nd.concat(real_in, real_out,
                                        dim=1) if append else real_out
                output = [netD(r) for r in real_concat]
                output2 = [netD2(r) for r in real_latent]
                real_label = gluon.utils.split_and_load(
                    nd.ones(out_i_shape), ctx)
                real_latent_label = gluon.utils.split_and_load(
                    nd.ones(out_l_shape), ctx)
                errD_real = [
                    GAN_loss(o, r) for o, r in zip(output, real_label)
                ]
                errD2_real = [
                    GAN_loss(o, r) for o, r in zip(output2, real_latent_label)
                ]
                for e1, e2, e4, e5 in zip(errD_real, errD_fake, errD2_real,
                                          errD2_fake):
                    err = (e1 + e2) * 0.5 + (e5 + e4) * 0.5
                    err.backward()
                for f, o in zip(real_label, output):
                    metric.update([
                        f,
                    ], [
                        o,
                    ])
                for f, o in zip(real_latent_label, output2):
                    metric2.update([
                        f,
                    ], [
                        o,
                    ])
            trainerD.step(batch.data[0].shape[0])
            trainerD2.step(batch.data[0].shape[0])
            nd.waitall()
            with autograd.record():
                strong_output = [netDS(netDe(e)) for e in eps]
                strong_real = [netDS(f) for f in fake_concat]
                errs1 = [
                    GAN_loss(r, f) for r, f in zip(strong_output, fake_label)
                ]
                errs2 = [
                    GAN_loss(r, f) for r, f in zip(strong_real, real_label)
                ]
                for f, s in zip(fake_label, strong_output):
                    metricStrong.update([
                        f,
                    ], [
                        s,
                    ])
                for f, s in zip(real_label, strong_real):
                    metricStrong.update([
                        f,
                    ], [
                        s,
                    ])
                for e1, e2 in zip(errs1, errs2):
                    strongerr = 0.5 * (e1 + e2)
                    strongerr.backward()
            trainerSD.step(batch.data[0].shape[0])
            nd.waitall()
            ############################
            # (2) Update G network: maximize log(D(x, G(x, z))) - lambda1 * L1(y, G(x, z))
            ###########################
            with autograd.record():
                sh = out_l_shape
                #eps2 = nd.random_normal(loc=0, scale=1, shape=noiseshape, ctx=ctx) #
                #eps = nd.random.uniform( low=-1, high=1, shape=noiseshape, ctx=ctx)
                #if epoch>100:
                #        eps2 = nd.multiply(eps2,sigma)+mu
                #        eps2 = nd.tanh(eps2)
                #else:
                #eps = nd.random.uniform( low=-1, high=1, shape=noiseshape, ctx=ctx)
                #eps2 = nd.concat(eps,eps2,dim=0)
                rec_output = [netD(netDe(e)) for e in eps2]
                fake_latent = [(netEn(r)) for r in real_in]
                output2 = [netD2(f) for f in fake_latent]
                fake_out = [netDe(f) for f in fake_latent]
                fake_concat = nd.concat(real_in, fake_out,
                                        dim=1) if append else fake_out
                output = [netD(f) for f in fake_concat]
                real_label = gluon.utils.split_and_load(
                    nd.ones(out_i_shape), ctx)
                real_latent_label = gluon.utils.split_and_load(
                    nd.ones(out_l_shape), ctx)
                errG2 = [
                    GAN_loss(r, f) for r, f in zip(rec_output, real_label)
                ]
                errR = [
                    L1_loss(r, f) * lambda1
                    for r, f in zip(real_out, fake_out)
                ]
                errG = [
                    10 * GAN_loss(r, f)
                    for r, f in zip(output2, real_latent_label)
                ]  # +errG2+errR
                for e1, e2, e3 in zip(errG, errG2, errR):
                    e = e1 + e2 + e3
                    e.backward()
            trainerDe.step(batch.data[0].shape[0])
            trainerEn.step(batch.data[0].shape[0])
            nd.waitall()
            errD = (errD_real[0] + errD_fake[0]) * 0.5
            errD2 = (errD2_real[0] + errD2_fake[0]) * 0.5
            loss_rec_G2.append(nd.mean(errG2[0]).asscalar())
            loss_rec_G.append(
                nd.mean(nd.mean(errG[0])).asscalar() -
                nd.mean(errG2[0]).asscalar() - nd.mean(errR[0]).asscalar())
            loss_rec_D.append(nd.mean(errD[0]).asscalar())
            loss_rec_R.append(nd.mean(errR[0]).asscalar())
            loss_rec_D2.append(nd.mean(errD2[0]).asscalar())
            _, acc2 = metric2.get()
            name, acc = metric.get()
            acc_rec.append(acc)
            acc2_rec.append(acc2)

            # Print log infomation every ten batches
            if iter % 10 == 0:
                _, acc2 = metric2.get()
                name, acc = metric.get()
                _, accStrong = metricStrong.get()
                logging.info('speed: {} samples/s'.format(
                    batch_size / (time.time() - btic)))
                #print(errD)
                #logging.info('discriminator loss = %f, D2 loss = %f, generator loss = %f, G2 loss = %f, SD loss = %f,  D acc = %f , D2 acc = %f, DS acc = %f, reconstruction error= %f  at iter %d epoch %d'
                #   	% (nd.mean(errD[0]).asscalar(),nd.mean(errD2[0]).asscalar(),
                #     	nd.mean(errG[0]-errG2[0]-errR[0]).asscalar(),nd.mean(errG2[0]).asscalar(),nd.mean(strongerr[0]).asscalar() ,acc,acc2,accStrong[0],nd.mean(errR[0]).asscalar() ,iter, epoch))
                iter = iter + 1
        btic = time.time()
        name, acc = metric.get()
        _, acc2 = metric2.get()
        #tp_file = open(expname + "_trainloss.txt", "a")
        #tp_file.write(str(nd.mean(errG2).asscalar()) + " " + str(
        #    nd.mean(nd.mean(errG)).asscalar() - nd.mean(errG2).asscalar() - nd.mean(errR).asscalar()) + " " + str(
        #    nd.mean(errD).asscalar()) + " " + str(nd.mean(errD2).asscalar()) + " " + str(nd.mean(errR).asscalar()) +" "+str(acc) + " " + str(acc2)+"\n")
        #tp_file.close()
        metric.reset()
        metric2.reset()
        train_data.reset()
        metricStrong.reset()

        logging.info('\nbinary training acc at epoch %d: %s=%f' %
                     (epoch, name, acc))
        logging.info('time: %f' % (time.time() - tic))
        if epoch % 2 == 0:  # and epoch>0:
            text_file = open(expname + "_validtest.txt", "a")
            filename = "checkpoints/" + expname + "_" + str(
                epoch) + "_D.params"
            netD.save_parameters(filename)
            filename = "checkpoints/" + expname + "_" + str(
                epoch) + "_D2.params"
            netD2.save_parameters(filename)
            filename = "checkpoints/" + expname + "_" + str(
                epoch) + "_En.params"
            netEn.save_parameters(filename)
            filename = "checkpoints/" + expname + "_" + str(
                epoch) + "_De.params"
            netDe.save_parameters(filename)
            filename = "checkpoints/" + expname + "_" + str(
                epoch) + "_SD.params"
            netDS.save_parameters(filename)
            fake_img1 = nd.concat(real_in[0], real_out[0], fake_out[0], dim=1)
            fake_img2 = nd.concat(real_in[1], real_out[1], fake_out[1], dim=1)
            fake_img3 = nd.concat(real_in[2], real_out[2], fake_out[2], dim=1)
            fake_img4 = nd.concat(real_in[3], real_out[3], fake_out[3], dim=1)
            val_data.reset()
            text_file = open(expname + "_validtest.txt", "a")
            for vbatch in val_data:

                real_in = vbatch.data[0]
                real_out = vbatch.data[1]
                real_in = gluon.utils.split_and_load(real_in, ctx)
                real_out = gluon.utils.split_and_load(real_out, ctx)

                fake_latent = [netEn(r) for r in real_in]
                fake_out = [netDe(f) for f in fake_latent]
                for f, r in zip(fake_out, real_out):
                    metricMSE.update([
                        f,
                    ], [
                        r,
                    ])
            _, acc2 = metricMSE.get()
            toterrR = 0
            for e in errR:
                toterrR += nd.mean(e).asscalar()
            text_file.write("%s %s %s\n" % (str(epoch), toterrR, str(acc2)))
            metricMSE.reset()
    return ([
        loss_rec_D, loss_rec_G, loss_rec_R, acc_rec, loss_rec_D2, loss_rec_G2,
        acc2_rec
    ])
Beispiel #13
0
    num_dice = int(input("How many dice would you like to roll?\n"))

    output = roll(sides, num_dice)

    # Check for a critical.
    if (sides == 20 and num_dice == 1):
        if (output == 20):
            print("Critical Success!")
        elif (output == 1):
            print("Critical Failure!")

        print(output)

    print(output)

    show_die = input("Would you like to see your individual rolls? (y / n)\n")

    # Exit condition.
    if (show_die != "y"):
        break

    print(show_rolls(num_dice))

    # Visualize the rolls.
    vis_rolls = input("Would you like to visualize your rolls? (y / n)\n")
    if (vis_rolls == 'y'):
        visualize(num_dice, sides, output, rolls)

    # Empties the rolls list to avoid answers accumulating in the list.
    rolls.clear()
Beispiel #14
0
def main(opt):
    ctx = mx.gpu() if opt.use_gpu else mx.cpu()
    testclasspaths = []
    testclasslabels = []
    print('loading test files')
    if opt.istest:
        filename = '_testlist.txt'
    elif opt.isvalidation:
        filename = '_trainlist.txt'
    else:
        filename = '_validationlist.txt'
    with open(opt.dataset + "_" + opt.expname + filename, 'r') as f:
        for line in f:
            testclasspaths.append(line.split(' ')[0])
            if int(line.split(' ')[1]) == -1:
                testclasslabels.append(0)
            else:
                testclasslabels.append(1)
    neworder = range(len(testclasslabels))
    neworder = shuffle(neworder)

    c = list(zip(testclasslabels, testclasspaths))
    print('shuffling')
    random.shuffle(c)

    testclasslabels, testclasspaths = zip(*c)
    testclasslabels = testclasslabels[1:5000]
    testclasspaths = testclasspaths[1:5000]

    print('loading pictures')
    test_data = load_image.load_test_images(testclasspaths, testclasslabels,
                                            opt.batch_size, opt.img_wd,
                                            opt.img_ht, ctx, opt.noisevar)
    print('picture loading done')
    netG, netD, trainerG, trainerD = set_network(opt.depth, ctx, 0, 0, opt.ndf,
                                                 opt.ngf, opt.append)
    netG.load_params('checkpoints/' + opt.expname + '_' + str(opt.epochs) +
                     '_G.params',
                     ctx=ctx)
    netD.load_params('checkpoints/' + opt.expname + '_' + str(opt.epochs) +
                     '_D.params',
                     ctx=ctx)
    print('Model loading done')
    lbllist = []
    scorelist1 = []
    scorelist2 = []
    scorelist3 = []
    scorelist4 = []
    test_data.reset()
    count = 0
    for batch in (test_data):
        count += 1
        print(str(count))  #, end="\r")
        real_in = batch.data[0].as_in_context(ctx)
        real_out = batch.data[1].as_in_context(ctx)
        lbls = batch.label[0].as_in_context(ctx)
        outnn = (netG(real_out))
        out_concat = nd.concat(real_out, outnn, dim=1) if opt.append else outnn
        output4 = nd.mean((netD(out_concat)), (1, 3, 2)).asnumpy()
        out = (netG(real_in))
        out_concat = nd.concat(real_in, out, dim=1) if opt.append else out
        output = netD(out_concat)  #Denoised image
        output3 = nd.mean(out - real_out, (1, 3, 2)).asnumpy()  #denoised-real
        output = nd.mean(output, (1, 3, 2)).asnumpy()
        out_concat = nd.concat(real_out, real_out,
                               dim=1) if opt.append else real_out
        output2 = netD(out_concat)  #Image with no noise
        output2 = nd.mean(output2, (1, 3, 2)).asnumpy()
        lbllist = lbllist + list(lbls.asnumpy())
        scorelist1 = scorelist1 + list(output)
        scorelist2 = scorelist2 + list(output2)
        scorelist3 = scorelist3 + list(output3)
        scorelist4 = scorelist4 + list(output4)

        fake_img1 = nd.concat(real_in[0], real_out[0], out[0], outnn[0], dim=1)
        fake_img2 = nd.concat(real_in[1], real_out[1], out[1], outnn[1], dim=1)
        fake_img3 = nd.concat(real_in[2], real_out[2], out[2], outnn[2], dim=1)
        fake_img4 = nd.concat(real_in[3], real_out[3], out[3], outnn[3], dim=1)
        fake_img = nd.concat(fake_img1, fake_img2, fake_img3, fake_img4, dim=2)
        #print(np.shape(fake_img))
        visual.visualize(fake_img)
        plt.savefig('outputs/T_' + opt.expname + '_' + str(count) + '.png')

    if not opt.isvalidation:

        fpr, tpr, _ = roc_curve(lbllist, scorelist1, 1)
        roc_auc1 = auc(fpr, tpr)
        fpr, tpr, _ = roc_curve(lbllist, scorelist2, 1)
        roc_auc2 = auc(fpr, tpr)
        fpr, tpr, _ = roc_curve(lbllist, scorelist3, 1)
        roc_auc3 = auc(fpr, tpr)
        fpr, tpr, _ = roc_curve(lbllist, scorelist4, 1)
        roc_auc4 = auc(fpr, tpr)
        return ([roc_auc1, roc_auc2, roc_auc3, roc_auc4])
    else:
        return ([0, 0, 0, 0])
Beispiel #15
0
def trainAE(opt, train_data, val_data, ctx, networks):

    netEn = networks[0]
    netDe = networks[1]
    trainerEn = networks[5]
    trainerDe = networks[6]
    epochs = opt.epochs
    batch_size = opt.batch_size
    expname = opt.expname
    text_file = open(expname + "_trainloss.txt", "w")
    text_file.close()
    text_file = open(expname + "_validtest.txt", "w")
    text_file.close()
    L1_loss = gluon.loss.L2Loss()
    metric2 = mx.metric.MSE()
    loss_rec_G = []
    loss_rec_D = []
    loss_rec_R = []
    acc_rec = []
    loss_rec_D2 = []
    stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
    logging.basicConfig(level=logging.DEBUG)
    for epoch in range(epochs):
        tic = time.time()
        btic = time.time()
        train_data.reset()
        iter = 0
        for batch in train_data:
            real_in = batch.data[0].as_in_context(ctx)
            real_out = batch.data[1].as_in_context(ctx)
            with autograd.record():
                fake_out = netDe(netEn(real_in))
                errR = L1_loss(real_out, fake_out)
                errR.backward()
            trainerDe.step(batch.data[0].shape[0])
            trainerEn.step(batch.data[0].shape[0])
        loss_rec_R.append(nd.mean(errR).asscalar())

        if iter % 10 == 0:
            logging.info('speed: {} samples/s'.format(batch_size /
                                                      (time.time() - btic)))
            logging.info('reconstruction error= %f at iter %d epoch %d' %
                         (nd.mean(errR).asscalar(), iter, epoch))
        iter = iter + 1
        btic = time.time()
        text_tl = open(expname + "_trainloss.txt", "a")
        text_tl.write('%f %f %f %f %f %f %f ' %
                      (0, 0, 0, 0, 0, nd.mean(errR).asscalar(), epoch))
        text_file.close()
        train_data.reset()
        if epoch % 10 == 0:
            filename = "checkpoints/" + expname + "_" + str(
                epoch) + "_En.params"
            netEn.save_params(filename)
            filename = "checkpoints/" + expname + "_" + str(
                epoch) + "_De.params"
            netDe.save_params(filename)
            fake_img1 = nd.concat(real_in[0], real_out[0], fake_out[0], dim=1)
            fake_img2 = nd.concat(real_in[1], real_out[1], fake_out[1], dim=1)
            fake_img3 = nd.concat(real_in[2], real_out[2], fake_out[2], dim=1)
            val_data.reset()
            text_file = open(expname + "_validtest.txt", "a")
            for vbatch in val_data:
                real_in = vbatch.data[0].as_in_context(ctx)
                real_out = vbatch.data[1].as_in_context(ctx)
                fake_out = netDe(netEn(real_in))
                metric2.update([
                    fake_out,
                ], [
                    real_out,
                ])
                _, acc2 = metric2.get()
            text_file.write("%s %s %s\n" %
                            (str(epoch), nd.mean(errR).asscalar(), str(acc2)))
            metric2.reset()
            fake_img1T = nd.concat(real_in[0], real_out[0], fake_out[0], dim=1)
            fake_img2T = nd.concat(real_in[1], real_out[1], fake_out[1], dim=1)
            fake_img3T = nd.concat(real_in[2], real_out[2], fake_out[2], dim=1)
            fake_img = nd.concat(fake_img1,
                                 fake_img2,
                                 fake_img3,
                                 fake_img1T,
                                 fake_img2T,
                                 fake_img3T,
                                 dim=2)
            visual.visualize(fake_img)
            plt.savefig('outputs/' + expname + '_' + str(epoch) + '.png')
            text_file.close()
    return ([loss_rec_D, loss_rec_G, loss_rec_R, acc_rec, loss_rec_D2])
Beispiel #16
0
def traincvpr18(opt, train_data, val_data, ctx, networks):

    netEn = networks[0]
    netDe = networks[1]
    netD = networks[2]
    trainerEn = networks[5]
    trainerDe = networks[6]
    trainerD = networks[7]
    epochs = opt.epochs
    lambda1 = opt.lambda1
    batch_size = opt.batch_size
    expname = opt.expname
    append = opt.append
    text_file = open(expname + "_trainloss.txt", "w")
    text_file.close()
    text_file = open(expname + "_validtest.txt", "w")
    text_file.close()
    GAN_loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
    L1_loss = gluon.loss.L2Loss()
    metric = mx.metric.CustomMetric(facc)
    metricl = mx.metric.CustomMetric(facc)
    metric2 = mx.metric.MSE()
    loss_rec_G2 = []
    loss_rec_G = []
    loss_rec_D = []
    loss_rec_R = []
    acc_rec = []
    loss_rec_D2 = []
    stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
    logging.basicConfig(level=logging.DEBUG)
    for epoch in range(epochs):
        tic = time.time()
        btic = time.time()
        train_data.reset()
        iter = 0
        for batch in train_data:
            ############################
            # (1) Update D network: maximize log(D(x, y)) + log(1 - D(x, G(x, z)))
            ###########################
            real_in = batch.data[0].as_in_context(ctx)
            real_out = batch.data[1].as_in_context(ctx)
            fake_latent = netEn(real_in)
            fake_out = netDe(fake_latent)
            fake_concat = nd.concat(real_in, fake_out,
                                    dim=1) if append else fake_out
            with autograd.record():
                # Train with fake image
                # Use image pooling to utilize history imagesi
                output = netD(fake_concat)
                fake_label = nd.zeros(output.shape, ctx=ctx)
                errD_fake = GAN_loss(output, fake_label)
                metric.update([
                    fake_label,
                ], [
                    output,
                ])
                real_concat = nd.concat(real_in, real_out,
                                        dim=1) if append else real_out
                output = netD(real_concat)
                real_label = nd.ones(output.shape, ctx=ctx)
                errD_real = GAN_loss(output, real_label)
                errD = (errD_real + errD_fake) * 0.5
                errD.backward()
                metric.update([
                    real_label,
                ], [
                    output,
                ])
            trainerD.step(batch.data[0].shape[0])
            ############################
            # (2) Update G network: maximize log(D(x, G(x, z))) - lambda1 * L1(y, G(x, z))
            ###########################
            with autograd.record():
                fake_latent = (netEn(real_in))
                fake_out = netDe(fake_latent)
                fake_concat = nd.concat(real_in, fake_out,
                                        dim=1) if append else fake_out
                output = netD(fake_concat)
                real_label = nd.ones(output.shape, ctx=ctx)
                errG = GAN_loss(
                    output, real_label) + L1_loss(real_out, fake_out) * lambda1
                errR = L1_loss(real_out, fake_out)
                errG.backward()
            trainerDe.step(batch.data[0].shape[0])
            trainerEn.step(batch.data[0].shape[0])
        loss_rec_G.append(
            nd.mean(errG).asscalar() - nd.mean(errR).asscalar() * lambda1)
        loss_rec_D.append(nd.mean(errD).asscalar())
        loss_rec_R.append(nd.mean(errR).asscalar())
        name, acc = metric.get()
        acc_rec.append(acc)
        # Print log infomation every ten batches
        if iter % 10 == 0:
            name, acc = metric.get()
            logging.info('speed: {} samples/s'.format(batch_size /
                                                      (time.time() - btic)))
            logging.info(
                'discriminator loss = %f, generator loss = %f, binary training acc = %f , reconstruction error= %f at iter %d epoch %d'
                % (nd.mean(errD).asscalar(), nd.mean(errG).asscalar(), acc,
                   nd.mean(errR).asscalar(), iter, epoch))
        iter = iter + 1
        btic = time.time()

        name, acc = metric.get()
        _, acc2 = metricl.get()
        text_tl = open(expname + "_trainloss.txt", "a")
        text_tl.write('%f %f %f %f %f %f %f ' %
                      (nd.mean(errD).asscalar(), 0, nd.mean(errG).asscalar(),
                       acc, 0, nd.mean(errR).asscalar(), epoch))
        text_file.close()
        metricl.reset()
        metric.reset()
        train_data.reset()

        logging.info('\nbinary training acc at epoch %d: %s=%f' %
                     (epoch, name, acc))
        logging.info('time: %f' % (time.time() - tic))
        if epoch % 10 == 0:
            filename = "checkpoints/" + expname + "_" + str(
                epoch) + "_D.params"
            netD.save_params(filename)
            filename = "checkpoints/" + expname + "_" + str(
                epoch) + "_En.params"
            netEn.save_params(filename)
            filename = "checkpoints/" + expname + "_" + str(
                epoch) + "_De.params"
            netDe.save_params(filename)
            fake_img1 = nd.concat(real_in[0], real_out[0], fake_out[0], dim=1)
            fake_img2 = nd.concat(real_in[1], real_out[1], fake_out[1], dim=1)
            fake_img3 = nd.concat(real_in[2], real_out[2], fake_out[2], dim=1)
            val_data.reset()
            text_file = open(expname + "_validtest.txt", "a")
            for vbatch in val_data:
                real_in = vbatch.data[0].as_in_context(ctx)
                real_out = vbatch.data[1].as_in_context(ctx)
                fake_latent = netEn(real_in)
                y = netDe(fake_latent)
                fake_out = y
                metric2.update([
                    fake_out,
                ], [
                    real_out,
                ])
                _, acc2 = metric2.get()
            text_file.write("%s %s %s\n" %
                            (str(epoch), nd.mean(errR).asscalar(), str(acc2)))
            metric2.reset()
            fake_img1T = nd.concat(real_in[0], real_out[0], fake_out[0], dim=1)
            fake_img2T = nd.concat(real_in[1], real_out[1], fake_out[1], dim=1)
            fake_img3T = nd.concat(real_in[2], real_out[2], fake_out[2], dim=1)
            fake_img = nd.concat(fake_img1,
                                 fake_img2,
                                 fake_img3,
                                 fake_img1T,
                                 fake_img2T,
                                 fake_img3T,
                                 dim=2)
            visual.visualize(fake_img)
            plt.savefig('outputs/' + expname + '_' + str(epoch) + '.png')
            text_file.close()
    return [loss_rec_D, loss_rec_G, loss_rec_R, acc_rec, loss_rec_D2]
Beispiel #17
0
    #fake_img2 = nd.concat(out[7],out[6], out[5], out[4],dim=1)
    #fake_img3 = nd.concat(out[8],out[9], out[10], out[11],dim=1)
    #fake_img4 = nd.concat(out[15],out[14], out[13], out[12],dim=1)
    #fake_img = nd.concat(fake_img1,fake_img2, fake_img3,fake_img4, dim=2)
    #print(np.shape(fake_img))
    #fakecode = nd.random.uniform(low = -1, high = 1, shape=(16, 128,1,1), ctx=ctx)
    #aakecode = nd.random.uniform(low = -1, high = 1, shape=(16, 128,1,1), ctx=ctx)
    #visual.visualize(fake_img)
    cnt = 0
    cnt2 = 0
    plt.figure(figsize=(50, 50))
    #clm = nd.array([],ctx=ctx)
    for i in range(30):
        rw = nd.array([], ctx=ctx)
        for j in range(30):
            print(cnt)
            if cnt % 30 == 0:
                rw = out[cnt].copy()
            else:
                rw = nd.concat(rw, out[cnt])
            cnt += 1
        if cnt2 == 0:
            clm = rw.copy()
        else:
            clm = nd.concat(clm, rw, dim=2)
        cnt2 += 1
    visual.visualize(clm)
    #plt.rcParams["figure.figsize"] = [50,50]
    #plt.figure(figsize=(50,50))
    plt.savefig('outputs/fakes_' + opt.expname + '_.png')
Beispiel #18
0
def train(pool_size,
          epochs,
          train_data,
          val_data,
          ctx,
          netG,
          netD,
          trainerG,
          trainerD,
          lambda1,
          batch_size,
          expname,
          append=True,
          useAE=False):

    text_file = open(expname + "_validtest.txt", "w")
    text_file.close()
    #netGT, netDT, _, _ = set_test_network(opt.depth, ctx, opt.lr, opt.beta1,opt.ndf,  opt.ngf, opt.append)
    dlr = trainerD.learning_rate
    glr = trainerG.learning_rate
    GAN_loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
    L1_loss = gluon.loss.L2Loss()
    image_pool = imagePool.ImagePool(pool_size)
    metric = mx.metric.CustomMetric(facc)
    metric2 = mx.metric.MSE()
    loss_rec_G = []
    loss_rec_D = []
    loss_rec_R = []
    acc_rec = []
    stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
    logging.basicConfig(level=logging.DEBUG)
    for epoch in range(epochs):

        if useAE:
            for batch in train_data:
                train_data.reset()
                real_in = batch.data[0].as_in_context(ctx)
                real_out = batch.data[1].as_in_context(ctx)
                fake_out = netG(real_in)
                loss = L1_loss(real_out, fake_out)
                loss.backward()
                trainerG.step(batch.data[0].shape[0])
                metric2.update([
                    real_out,
                ], [
                    fake_out,
                ])
                if epoch % 10 == 0:
                    filename = "checkpoints/" + expname + "_" + str(
                        epoch) + "_G.params"
                    netG.save_params(filename)
                    fake_img1 = nd.concat(real_in[0],
                                          real_out[0],
                                          fake_out[0],
                                          dim=1)
                    fake_img2 = nd.concat(real_in[1],
                                          real_out[1],
                                          fake_out[1],
                                          dim=1)
                    fake_img3 = nd.concat(real_in[2],
                                          real_out[2],
                                          fake_out[2],
                                          dim=1)
                    fake_img4 = nd.concat(real_in[3],
                                          real_out[3],
                                          fake_out[3],
                                          dim=1)
                    #fake_img4T = nd.concat(real_in[3],real_out[3], fake_out[3], dim=1)
                    fake_img = nd.concat(fake_img1,
                                         fake_img2,
                                         fake_img3,
                                         dim=2)
                    visual.visualize(fake_img)
                    plt.savefig('outputs/' + expname + '_' + str(epoch) +
                                '.png')
            train_data.reset()
            name, acc = metric.get()
            metric2.reset()
            print("training acc: " + acc)

        else:
            tic = time.time()
            btic = time.time()
            train_data.reset()
            iter = 0
            if epoch > 250:
                trainerD.set_learning_rate(dlr * (1 - int(epoch - 250) / 1000))
                trainerG.set_learning_rate(glr * (1 - int(epoch - 250) / 1000))
            #print('learning rate : '+str(trainerD.learning_rate ))
            for batch in train_data:
                ############################
                # (1) Update D network: maximize log(D(x, y)) + log(1 - D(x, G(x, z)))
                ###########################
                real_in = batch.data[0].as_in_context(ctx)
                real_out = batch.data[1].as_in_context(ctx)

                fake_out = netG(real_in)
                fake_concat = nd.concat(real_in, fake_out,
                                        dim=1) if append else fake_out
                with autograd.record():
                    # Train with fake image
                    # Use image pooling to utilize history images
                    output = netD(fake_concat)
                    fake_label = nd.zeros(output.shape, ctx=ctx)
                    errD_fake = GAN_loss(output, fake_label)
                    metric.update([
                        fake_label,
                    ], [
                        output,
                    ])
                    real_concat = nd.concat(real_in, real_out,
                                            dim=1) if append else real_out
                    output = netD(real_concat)
                    real_label = nd.ones(output.shape, ctx=ctx)
                    errD_real = GAN_loss(output, real_label)
                    errD = (errD_real + errD_fake) * 0.5
                    errD.backward()
                    metric.update([
                        real_label,
                    ], [
                        output,
                    ])

                trainerD.step(batch.data[0].shape[0])

                ############################
                # (2) Update G network: maximize log(D(x, G(x, z))) - lambda1 * L1(y, G(x, z))
                ###########################
                with autograd.record():
                    fake_out = netG(real_in)
                    fake_concat = nd.concat(real_in, fake_out,
                                            dim=1) if append else fake_out
                    output = netD(fake_concat)
                    real_label = nd.ones(output.shape, ctx=ctx)
                    errG = GAN_loss(output, real_label) + L1_loss(
                        real_out, fake_out) * lambda1
                    errR = L1_loss(real_out, fake_out)
                    errG.backward()

                trainerG.step(batch.data[0].shape[0])
                loss_rec_G.append(
                    nd.mean(errG).asscalar() -
                    nd.mean(errR).asscalar() * lambda1)
                loss_rec_D.append(nd.mean(errD).asscalar())
                loss_rec_R.append(nd.mean(errR).asscalar())
                name, acc = metric.get()
                acc_rec.append(acc)
                # Print log infomation every ten batches
                if iter % 5 == 0:
                    name, acc = metric.get()
                    logging.info('speed: {} samples/s'.format(
                        batch_size / (time.time() - btic)))
                    #print(errD)
                    logging.info(
                        'discriminator loss = %f, generator loss = %f, binary training acc = %f reconstruction error= %f at iter %d epoch %d'
                        % (nd.mean(errD).asscalar(), nd.mean(errG).asscalar(),
                           acc, nd.mean(errR).asscalar(), iter, epoch))
                iter = iter + 1
                btic = time.time()

            name, acc = metric.get()
            metric.reset()
            train_data.reset()

            logging.info('\nbinary training acc at epoch %d: %s=%f' %
                         (epoch, name, acc))
            logging.info('time: %f' % (time.time() - tic))
            if epoch % 5 == 0:
                text_file = open(expname + "_validtest.txt", "a")
                filename = "checkpoints/" + expname + "_" + str(
                    epoch) + "_D.params"
                netD.save_params(filename)
                filename = "checkpoints/" + expname + "_" + str(
                    epoch) + "_G.params"
                netG.save_params(filename)
                fake_img1 = nd.concat(real_in[0],
                                      real_out[0],
                                      fake_out[0],
                                      dim=1)
                fake_img2 = nd.concat(real_in[1],
                                      real_out[1],
                                      fake_out[1],
                                      dim=1)
                fake_img3 = nd.concat(real_in[2],
                                      real_out[2],
                                      fake_out[2],
                                      dim=1)
                fake_img4 = nd.concat(real_in[3],
                                      real_out[3],
                                      fake_out[3],
                                      dim=1)
                val_data.reset()

                for vbatch in val_data:

                    real_in = vbatch.data[0].as_in_context(ctx)
                    real_out = vbatch.data[1].as_in_context(ctx)
                    fake_out = netG(real_in)
                    metric2.update([
                        fake_out,
                    ], [
                        real_out,
                    ])
                _, acc2 = metric2.get()
                text_file.write(
                    "%s %s %s\n" %
                    (str(epoch), nd.mean(errR).asscalar(), str(acc2)))
                metric2.reset()

                fake_img1T = nd.concat(real_in[0],
                                       real_out[0],
                                       fake_out[0],
                                       dim=1)
                fake_img2T = nd.concat(real_in[1],
                                       real_out[1],
                                       fake_out[1],
                                       dim=1)
                fake_img3T = nd.concat(real_in[2],
                                       real_out[2],
                                       fake_out[2],
                                       dim=1)
                #fake_img4T = nd.concat(real_in[3],real_out[3], fake_out[3], dim=1)
                fake_img = nd.concat(fake_img1,
                                     fake_img2,
                                     fake_img3,
                                     fake_img1T,
                                     fake_img2T,
                                     fake_img3T,
                                     dim=2)
                visual.visualize(fake_img)
                plt.savefig('outputs/' + expname + '_' + str(epoch) + '.png')
                text_file.close()

    return ([loss_rec_D, loss_rec_G, loss_rec_R, acc_rec])
Beispiel #19
0
def train():
    image_pool = imagePool.ImagePool(pool_size)
    metric = mx.metric.CustomMetric(facc)

    stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
    logging.basicConfig(level=logging.DEBUG)

    for epoch in range(epochs):
        tic = time.time()
        btic = time.time()
        train_data.reset()
        iter = 0
        for batch in train_data:
            ############################
            # (1) Update D network: maximize log(D(x, y)) + log(1 - D(x, G(x, z)))
            ###########################
            real_in = batch.data[0].as_in_context(ctx)
            real_out = batch.data[1].as_in_context(ctx)

            fake_out = netG(real_in)
            fake_concat = image_pool.query(nd.concat(real_in, fake_out, dim=1))
            with autograd.record():
                # Train with fake image
                # Use image pooling to utilize history images
                output = netD(fake_concat)
                fake_label = nd.zeros(output.shape, ctx=ctx)
                errD_fake = GAN_loss(output, fake_label)
                metric.update([
                    fake_label,
                ], [
                    output,
                ])

                # Train with real image
                real_concat = nd.concat(real_in, real_out, dim=1)
                output = netD(real_concat)
                real_label = nd.ones(output.shape, ctx=ctx)
                errD_real = GAN_loss(output, real_label)
                errD = (errD_real + errD_fake) * 0.5
                errD.backward()
                metric.update([
                    real_label,
                ], [
                    output,
                ])

            trainerD.step(batch.data[0].shape[0])

            ############################
            # (2) Update G network: maximize log(D(x, G(x, z))) - lambda1 * L1(y, G(x, z))
            ###########################
            with autograd.record():
                fake_out = netG(real_in)
                fake_concat = nd.concat(real_in, fake_out, dim=1)
                output = netD(fake_concat)
                real_label = nd.ones(output.shape, ctx=ctx)
                errG = GAN_loss(
                    output, real_label) + L1_loss(real_out, fake_out) * lambda1
                errG.backward()

            trainerG.step(batch.data[0].shape[0])

            # Print log infomation every ten batches
            if iter % 10 == 0:
                name, acc = metric.get()
                logging.info('speed: {} samples/s'.format(
                    batch_size / (time.time() - btic)))
                logging.info(
                    'discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d'
                    % (nd.mean(errD).asscalar(), nd.mean(errG).asscalar(), acc,
                       iter, epoch))
            iter = iter + 1
            btic = time.time()

        name, acc = metric.get()
        metric.reset()
        logging.info('\nbinary training acc at epoch %d: %s=%f' %
                     (epoch, name, acc))
        logging.info('time: %f' % (time.time() - tic))
        if epoch % 10 == 0:
            filename = "checkpoints/testnet_" + str(epoch) + "_D.params"
            netD.save_params(filename)
            filename = "checkpoints/testnet_" + str(epoch) + "_G.params"
            netG.save_params(filename)
            # Visualize one generated image for each epoch
            fake_img = fake_out[0]
            visual.visualize(fake_img)
            plt.savefig('outputs/testnet_' + str(epoch) + '.png')
Beispiel #20
0
def main(opt):
    if opt.useAE == 1:
        useAE = True
    else:
        useAE = False
    if opt.seed != -1:
        random.seed(opt.seed)
    ctx = mx.gpu() if opt.use_gpu else mx.cpu()
    inclasspaths, inclasses = dload.loadPaths(opt.dataset, opt.datapath,
                                              opt.expname, opt.batch_size + 1,
                                              opt.classes)
    train_data, val_data = load_image.load_image(inclasspaths, opt.batch_size,
                                                 opt.img_wd, opt.img_ht,
                                                 opt.noisevar)
    print('Data loading done.')

    if opt.istest:
        testclasspaths = []
        testclasslabels = []
        if opt.istest:
            filename = '_testlist.txt'
        elif opt.isvalidation:
            filename = '_trainlist.txt'
        else:
            filename = '_validationlist.txt'
        filename = '_trainlist.txt'
        with open(opt.dataset + "_" + opt.expname + filename, 'r') as f:
            for line in f:
                testclasspaths.append(line.split(' ')[0])
                if int(line.split(' ')[1]) == -1:
                    testclasslabels.append(0)
                else:
                    testclasslabels.append(1)

        test_data = load_image.load_test_images(testclasspaths,
                                                testclasslabels,
                                                opt.batch_size, opt.img_wd,
                                                opt.img_ht, ctx, opt.noisevar)
        netG, netD, trainerG, trainerD = set_network(opt.depth, ctx, 0, 0,
                                                     opt.ndf, opt.ngf,
                                                     opt.append)
        netG.load_params('checkpoints/' + opt.expname + '_' + str(opt.epochs) +
                         '_G.params',
                         ctx=ctx)
        netD.load_params('checkpoints/' + opt.expname + '_' + str(opt.epochs) +
                         '_D.params',
                         ctx=ctx)
        lbllist = []
        scorelist1 = []
        scorelist2 = []
        scorelist3 = []
        scorelist4 = []
        test_data.reset()
        count = 0

        for batch in (test_data):
            count += 1
            real_in = batch.data[0].as_in_context(ctx)
            real_out = batch.data[1].as_in_context(ctx)
            lbls = batch.label[0].as_in_context(ctx)
            outnn = (netG(real_out))
            out_concat = nd.concat(real_out, outnn,
                                   dim=1) if opt.append else outnn
            output4 = nd.mean((netD(out_concat)), (1, 3, 2)).asnumpy()
            out = (netG(real_in))
            out_concat = nd.concat(real_in, out, dim=1) if opt.append else out
            output = netD(out_concat)  #Denoised image
            output3 = nd.mean(out - real_out,
                              (1, 3, 2)).asnumpy()  #denoised-real
            output = nd.mean(output, (1, 3, 2)).asnumpy()
            out_concat = nd.concat(real_out, real_out,
                                   dim=1) if opt.append else real_out
            output2 = netD(out_concat)  #Image with no noise
            output2 = nd.mean(output2, (1, 3, 2)).asnumpy()
            lbllist = lbllist + list(lbls.asnumpy())
            scorelist1 = scorelist1 + list(output)
            scorelist2 = scorelist2 + list(output2)
            scorelist3 = scorelist3 + list(output3)
            scorelist4 = scorelist4 + list(output4)

        fake_img1 = nd.concat(real_in[0], real_out[0], out[0], outnn[0], dim=1)
        fake_img2 = nd.concat(real_in[1], real_out[1], out[1], outnn[1], dim=1)
        fake_img3 = nd.concat(real_in[2], real_out[2], out[2], outnn[2], dim=1)
        fake_img4 = nd.concat(real_in[3], real_out[3], out[3], outnn[3], dim=1)
        fake_img = nd.concat(fake_img1, fake_img2, fake_img3, fake_img4, dim=2)
        #print(np.shape(fake_img))
        visual.visualize(fake_img)
        plt.savefig('outputs/T_' + opt.expname + '_' + str(count) + '.png')
        '''
            fpr, tpr, _ = roc_curve(lbllist, scorelist1, 1)
            roc_auc1 = auc(fpr, tpr)
            fpr, tpr, _ = roc_curve(lbllist, scorelist2, 1)
            roc_auc2 = auc(fpr, tpr)
            fpr, tpr, _ = roc_curve(lbllist, scorelist3, 1)
            roc_auc3 = auc(fpr, tpr)
            fpr, tpr, _ = roc_curve(lbllist, scorelist4, 1)
            roc_auc4 = auc(fpr, tpr)
            return([roc_auc1, roc_auc2, roc_auc3, roc_auc4])
            '''

        return ([0, 0, 0, 0])

    else:
        netG, netD, trainerG, trainerD = set_network(opt.depth, ctx, opt.lr,
                                                     opt.beta1, opt.ndf,
                                                     opt.ngf, opt.append)
        if opt.graphvis:
            print(netG)
        print('training')
        print(opt.epochs)
        loss_vec = train(opt.pool_size,
                         opt.epochs,
                         train_data,
                         val_data,
                         ctx,
                         netG,
                         netD,
                         trainerG,
                         trainerD,
                         opt.lambda1,
                         opt.batch_size,
                         opt.expname,
                         opt.append,
                         useAE=useAE)
        plt.gcf().clear()
        plt.plot(loss_vec[0], label="D", alpha=0.7)
        plt.plot(loss_vec[1], label="G", alpha=0.7)
        plt.plot(loss_vec[2], label="R", alpha=0.7)
        plt.plot(loss_vec[3], label="Acc", alpha=0.7)
        plt.legend()
        plt.savefig('outputs/' + opt.expname + '_loss.png')
        return inclasses
Beispiel #21
0
            with open(os.path.join(args.output_dir, args.vis_txt_dir),
                      args.mode) as f:
                if not args.append:
                    print("Here")
                    print(args.mode)
                    print(", ".join(names) + "\n")
                    f.write(", ".join(names) + "\n")
                    f.flush()

                for step in steps:
                    print('Running time_step = {}'.format(step))
                    lst = trainMain(mkcmd(step, args.output_folder))
                    result.append(lst)
                    f.write(
                        "{:.2}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.2f}\n"
                        .format(lst[8], lst[0], lst[1], lst[2], lst[3], lst[4],
                                lst[5], lst[6], lst[7]))
                    f.flush()

                f.close()

    if not os.path.exists(os.path.join(args.output_dir, args.vis_txt_dir)):
        print("Could not find {} file.".format(args.vis_txt_dir))
        print("Exiting...")
        sys.exit(1)

    if not os.path.exists(os.path.join(args.output_dir, args.figure_dir)):
        os.makedirs(os.path.join(args.output_dir, args.figure_dir))

    visualize(args.output_dir, args.figure_dir)
Beispiel #22
0
def mainEvaluation(opt):
    ctx = mx.gpu() if opt.use_gpu else mx.cpu()
    testclasspaths = []
    testclasslabels = []
    print('loading test files')
    filename = '_testlist.txt'
    with open(opt.dataset + "_" + opt.expname + filename, 'r') as f:
        for line in f:
            testclasspaths.append(line.split(' ')[0])
            if int(line.split(' ')[1]) == -1:
                testclasslabels.append(0)
            else:
                testclasslabels.append(1)
    neworder = range(len(testclasslabels))
    c = list(zip(testclasslabels, testclasspaths))
    print('shuffling')
    random.shuffle(c)
    testclasslabels, testclasspaths = zip(*c)
    print('loading pictures')
    test_data = load_image.load_test_images(testclasspaths, testclasslabels,
                                            opt.batch_size, opt.img_wd,
                                            opt.img_ht, ctx, opt.noisevar)
    print('picture loading done')
    opt.istest = True
    networks = models.set_network(opt, ctx, True)
    netEn = networks[0]
    netDe = networks[1]
    netD = networks[2]
    netD2 = networks[3]
    load_epoch = opt.epochs - 1
    netEn.load_params('checkpoints/' + opt.expname + '_' + str(load_epoch) +
                      '_En.params',
                      ctx=ctx)
    netDe.load_params('checkpoints/' + opt.expname + '_' + str(load_epoch) +
                      '_De.params',
                      ctx=ctx)
    if opt.ntype > 1:
        netD.load_params('checkpoints/' + opt.expname + '_' + str(load_epoch) +
                         '_D.params',
                         ctx=ctx)
    if opt.ntype > 2:
        netD2.load_params('checkpoints/' + opt.expname + '_' +
                          str(load_epoch) + '_D2.params',
                          ctx=ctx)

    print('Model loading done')
    lbllist = []
    scorelist1 = []
    scorelist2 = []
    scorelist3 = []
    scorelist4 = []
    test_data.reset()
    count = 0

    for batch in (test_data):
        count = count + 1
        output1 = np.zeros(opt.batch_size)
        output2 = np.zeros(opt.batch_size)
        output3 = np.zeros(opt.batch_size)
        output4 = np.zeros(opt.batch_size)
        real_in = batch.data[0].as_in_context(ctx)
        real_out = batch.data[1].as_in_context(ctx)
        lbls = batch.label[0].as_in_context(ctx)
        outnn = (netDe(netEn((real_in))))
        out = outnn
        output3 = -1 * nd.mean((outnn - real_out)**2, (1, 3, 2)).asnumpy()
        if opt.ntype > 1:  #AE
            out_concat = nd.concat(real_in, outnn,
                                   dim=1) if opt.append else outnn
            output1 = nd.mean((netD(out_concat)), (1, 3, 2)).asnumpy()
            out_concat = nd.concat(real_in, real_in,
                                   dim=1) if opt.append else real_in
            output2 = netD((out_concat))  # Image with no noise
            output2 = nd.mean(output2, (1, 3, 2)).asnumpy()
            out = netDe(netEn(real_out))
            out_concat = nd.concat(real_in, out, dim=1) if opt.append else out
            output = netD(out_concat)  #Denoised image
            output4 = nd.mean(output, (1, 3, 2)).asnumpy()
        lbllist = lbllist + list(lbls.asnumpy())
        scorelist1 = scorelist1 + list(output1)
        scorelist2 = scorelist2 + list(output2)
        scorelist3 = scorelist3 + list(output3)
        scorelist4 = scorelist4 + list(output4)
        out = netDe(netEn(real_in))

        # Save some sample results
        fake_img1 = nd.concat(real_in[0], real_out[0], out[0], outnn[0], dim=1)
        fake_img2 = nd.concat(real_in[1], real_out[1], out[1], outnn[1], dim=1)
        fake_img3 = nd.concat(real_in[2], real_out[2], out[2], outnn[2], dim=1)
        fake_img4 = nd.concat(real_in[3], real_out[3], out[3], outnn[3], dim=1)
        fake_img = nd.concat(fake_img1, fake_img2, fake_img3, fake_img4, dim=2)
        visual.visualize(fake_img)
        plt.savefig('outputs/T_' + opt.expname + '_' + str(count) + '.png')

    print("Positives" + str(np.sum(lbllist)))
    print("Negatives" + str(np.shape(lbllist) - np.sum(lbllist)))
    fpr, tpr, _ = roc_curve(lbllist, scorelist3, 1)
    roc_auc1 = 0
    roc_auc2 = 0
    roc_auc4 = 0
    roc_auc3 = auc(fpr, tpr)
    if int(opt.ntype) > 1:  #AE
        fpr, tpr, _ = roc_curve(lbllist, scorelist1, 1)
        roc_auc1 = auc(fpr, tpr)
        fpr, tpr, _ = roc_curve(lbllist, scorelist2, 1)
        roc_auc2 = auc(fpr, tpr)
        fpr, tpr, _ = roc_curve(lbllist, scorelist4, 1)
        roc_auc4 = auc(fpr, tpr)

    return [roc_auc1, roc_auc2, roc_auc3, roc_auc4]
Beispiel #23
0
		GWH1 = (GWH1*(n_batch-1) + gWH1)/n_batch # Accumulate average gradient of WH1
		GBH1 = (GBH1*(n_batch-1) + gBH1)/n_batch # Accumulate average gradient of BH1
		if err < Err:
			Err = err # Accumulate minimum mini-batch error
	if verbose:
		print '			Min error in epoch %d/%d is %f' % (i, EPOCHS, Err)
	VW = VW*zeta - alpha * GW
	VB = VB*zeta - alpha * GB
	VWH1 = VWH1*zeta - alpha * GWH1
	VBH1 = VBH1*zeta - alpha * GBH1
	L.W.set_value( L.W.get_value() + VW )
	L.B.set_value( L.B.get_value() + VB )
	H.W.set_value( H.W.get_value() + VWH1 )
	H.B.set_value( H.B.get_value() + VBH1 )
		
	Q = abs( array( predict ( Ts[0] ) ) - Ts[1] )
	if verbose:
		print m_test_sample - count_nonzero(Q)
	errAcc.append(Err)

	plot(errAcc)
	savefig('plot_'+str(EPOCHS)+'_'+str(TOTAL_BATCHES)+'_'+str(alpha)+'_'+str(zeta)+'_'+str(lam)+'best'+'.png')


	vis = visualize ( H.W.get_value().transpose()[:10,:] )
	imwrite('weights_mlp'+str(i)+'.png', vis)
# savefig('plot_'+str(EPOCHS)+'_'+str(TOTAL_BATCHES)+'_'+str(alpha)+'_'+str(zeta)+'_'+str(lam)+'best'+'.png')
# fl = open('LR_weights', 'wb')
# import pickle
# pickle.dump([L.W.get_value(), L.B.get_value()], fl)
# fl.close() '''