def test_step(): print 'Testing...' all_z = [] batch_size = 23 for i in range(int(len(test_order)/batch_size)): input_image = get_data.get_jpg_test(test_jpg,test_order[batch_size*i:batch_size*(i+1)])/128.0 input_label = get_data.get_label(test_label,test_order[batch_size*i:batch_size*(i+1)]) feed_dict={} feed_dict[hg.input_image]=input_image feed_dict[hg.input_label]=input_label feed_dict[hg.keep_prob]=1.0 z_miu= sess.run(hg.output,feed_dict) all_z.append(z_miu) all_z = np.array(all_z) all_z = np.reshape(all_z,(-1,17)) np.save('all_output.npy',all_z)
def test_step(): print 'Testing...' # all_ce_loss = 0 # all_output = [] # all_label = [] all_feature = [] batch_size = 23 for i in range(int(len(train_order)/batch_size)): input_image = get_data.get_jpg_test(train_jpg,train_order[batch_size*i:batch_size*(i+1)])/128.0 input_label = get_data.get_label(train_label,train_order[batch_size*i:batch_size*(i+1)]) feed_dict={} feed_dict[hg.input_image]=input_image feed_dict[hg.input_label]=input_label feed_dict[hg.keep_prob]=1.0 feature = sess.run(hg.feature,feed_dict) # all_ce_loss += ce_loss # for i in output: # all_output.append(i) # for i in input_label: # all_label.append(i) all_feature.append(feature) # all_output = np.array(all_output) # all_label = np.array(all_label) #average_precision = average_precision_score(all_label,all_output) all_feature = np.array(all_feature) all_feature = np.reshape(all_feature,(-1,256)) np.save('train_feature.npy',all_feature)
def validation_step(current_step): print 'Validating...' all_ce_loss = 0 all_l2_loss = 0 all_total_loss = 0 all_output = [] all_label = [] for i in range(int(len(validation_order) / 18)): input_image = get_data.get_jpg_test( jpg_list, validation_order[18 * i:18 * (i + 1)]) / 128.0 input_label = get_data.get_label( data, validation_order[18 * i:18 * (i + 1)]) input_nlcd = get_data.get_nlcd( data, validation_order[18 * i:18 * (i + 1)]) feed_dict = {} feed_dict[hg.input_image] = input_image feed_dict[hg.input_nlcd] = input_nlcd feed_dict[hg.input_label] = input_label feed_dict[hg.keep_prob] = 1.0 ce_loss, l2_loss, total_loss, output = sess.run( [hg.ce_loss, hg.l2_loss, hg.total_loss, hg.output], feed_dict) all_ce_loss += ce_loss all_l2_loss += l2_loss all_total_loss += total_loss for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) auc = roc_auc_score(all_label, all_output) ce_loss = all_ce_loss / 283.0 l2_loss = all_l2_loss / 283.0 total_loss = all_total_loss / 283.0 all_output = np.reshape(all_output, (-1)) all_label = np.reshape(all_label, (-1)) ap = average_precision_score(all_label, all_output) time_str = datetime.datetime.now().isoformat() tempstr = "{}: auc {:g}, ap {:g}, ce_loss {:g}, l2_loss {:g}, total_loss {:g}".format( time_str, auc, ap, ce_loss, l2_loss, total_loss) print(tempstr) summary_writer.add_summary(MakeSummary('validation/auc', auc), current_step) summary_writer.add_summary(MakeSummary('validation/ap', ap), current_step) summary_writer.add_summary(MakeSummary('validation/ce_loss', ce_loss), current_step) return ce_loss
def test_step(): print 'Testing...' all_recon_loss = 0 all_kl_loss = 0 all_vae_loss = 0 all_l2_loss = 0 all_total_loss = 0 all_output = [] all_label = [] for i in range(int(len(test_order)/18)): input_image = get_data.get_jpg_test(jpg_list,test_order[18*i:18*(i+1)])/128.0 input_nlcd = get_data.get_nlcd(data,test_order[18*i:18*(i+1)]) input_label = get_data.get_label(data,test_order[18*i:18*(i+1)]) feed_dict={} feed_dict[hg.input_nlcd]=input_nlcd feed_dict[hg.input_image]=input_image feed_dict[hg.input_label]=input_label feed_dict[hg.keep_prob]=1.0 recon_loss,kl_loss,vae_loss,l2_loss,total_loss,output= sess.run([hg.recon_loss,hg.kl_loss,hg.vae_loss,hg.l2_loss,hg.total_loss,hg.output],feed_dict) all_recon_loss += recon_loss all_kl_loss += kl_loss all_vae_loss += vae_loss all_l2_loss += l2_loss all_total_loss += total_loss for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) auc = roc_auc_score(all_label,all_output) recon_loss = all_recon_loss/283.0 kl_loss = all_kl_loss/283.0 vae_loss = all_vae_loss/283.0 l2_loss = all_l2_loss/283.0 total_loss = all_total_loss/283.0 all_output=np.reshape(all_output,(-1)) all_label=np.reshape(all_label,(-1)) ap = average_precision_score(all_label,all_output) time_str = datetime.datetime.now().isoformat() tempstr = "{}: auc {:g}, ap {:g}, recon_loss {:g}, kl_loss {:g}, vae_loss {:g}, l2_loss {:g}, total_loss {:g}".format(time_str, auc, ap, recon_loss, kl_loss, vae_loss, l2_loss, total_loss) print(tempstr) auc = roc_auc_score(all_label,all_output) print "new_auc {:g}".format(auc)
def test_step(): print 'Testing...' all_recon_loss = 0 all_kl_loss = 0 all_vae_loss = 0 all_l2_loss = 0 all_total_loss = 0 all_output = [] all_label = [] for i in range(int(len(test_order)/18)): input_image = get_data.get_jpg_test(jpg_list,test_order[18*i:18*(i+1)]) input_nlcd = get_data.get_nlcd(data,test_order[18*i:18*(i+1)]) input_label = get_data.get_label(data,test_order[18*i:18*(i+1)]) feed_dict={} feed_dict[hg.input_nlcd]=input_nlcd feed_dict[hg.input_image]=input_image feed_dict[hg.input_label]=input_label feed_dict[hg.keep_prob]=1.0 recon_loss,kl_loss,vae_loss,l2_loss,total_loss,output= sess.run([hg.recon_loss,hg.kl_loss,hg.vae_loss,hg.l2_loss,hg.total_loss,hg.output],feed_dict) all_recon_loss += recon_loss all_kl_loss += kl_loss all_vae_loss += vae_loss all_l2_loss += l2_loss all_total_loss += total_loss for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) auc = roc_auc_score(all_label,all_output) recon_loss = all_recon_loss/283.0 kl_loss = all_kl_loss/283.0 vae_loss = all_vae_loss/283.0 l2_loss = all_l2_loss/283.0 total_loss = all_total_loss/283.0 all_output=np.reshape(all_output,(-1)) all_label=np.reshape(all_label,(-1)) ap = average_precision_score(all_label,all_output) time_str = datetime.datetime.now().isoformat() tempstr = "{}: auc {:g}, ap {:g}, recon_loss {:g}, kl_loss {:g}, vae_loss {:g}, l2_loss {:g}, total_loss {:g}".format(time_str, auc, ap, recon_loss, kl_loss, vae_loss, l2_loss, total_loss) print(tempstr) auc = roc_auc_score(all_label,all_output) print "new_auc {:g}".format(auc)
def test_step(): print 'Testing...' all_ce_loss = 0 all_output = [] all_label = [] batch_size = 18 for i in range(int(len(test_order) / batch_size)): input_image = get_data.get_jpg_test( jpg_list, test_order[batch_size * i:batch_size * (i + 1)]) / 128.0 input_label = get_data.get_label( data, test_order[batch_size * i:batch_size * (i + 1)]) input_nlcd = get_data.get_nlcd( data, test_order[batch_size * i:batch_size * (i + 1)]) feed_dict = {} feed_dict[hg.input_image] = input_image feed_dict[hg.input_label] = input_label feed_dict[hg.input_nlcd] = input_nlcd feed_dict[hg.keep_prob] = 1.0 ce_loss, output = sess.run([hg.ce_loss, hg.output], feed_dict) all_ce_loss += ce_loss for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) #average_precision = average_precision_score(all_label,all_output) loglike = all_ce_loss / (int(len(test_order) / batch_size)) np.save('output.npy', all_output) np.save('label.npy', all_label) auc = roc_auc_score(all_label, all_output) #loglike = log_likelihood(all_label,all_output) time_str = datetime.datetime.now().isoformat() tempstr = "{}: auc {:g}, log_likelihood {:g}".format( time_str, auc, loglike) print(tempstr) all_output = np.reshape(all_output, (-1)) all_label = np.reshape(all_label, (-1)) ap = average_precision_score(all_label, all_output) auc_2 = roc_auc_score(all_label, all_output) print 'ap:' + str(ap) print 'auc_2:' + str(auc_2)
def validation_step(current_step): print 'Validating...' all_ce_loss = 0 all_l2_loss = 0 all_total_loss = 0 all_output = [] all_label = [] valid_batch = 23 for i in range(int(len(validation_order)/valid_batch)): input_image = get_data.get_jpg_test(validation_jpg,validation_order[valid_batch*i:valid_batch*(i+1)])/128.0 input_label = get_data.get_label(validation_label,validation_order[valid_batch*i:valid_batch*(i+1)]) feed_dict={} feed_dict[hg.input_image]=input_image feed_dict[hg.input_label]=input_label feed_dict[hg.keep_prob]=1.0 ce_loss,l2_loss,total_loss,output= sess.run([hg.ce_loss,hg.l2_loss,hg.total_loss,hg.output],feed_dict) all_ce_loss += ce_loss all_l2_loss += l2_loss all_total_loss += total_loss for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) #auc = roc_auc_score(all_label,all_output) ce_loss = all_ce_loss/(4048/valid_batch) l2_loss = all_l2_loss/(4048/valid_batch) total_loss = all_total_loss/(4048/valid_batch) all_output=np.reshape(all_output,(-1)) all_label=np.reshape(all_label,(-1)) ap = average_precision_score(all_label,all_output) auc = roc_auc_score(all_label,all_output) time_str = datetime.datetime.now().isoformat() tempstr = "{}: auc {:g}, ap {:g}, ce_loss {:g}, l2_loss {:g}, total_loss {:g}".format(time_str, auc, ap, ce_loss, l2_loss, total_loss) print(tempstr) summary_writer.add_summary(MakeSummary('validation/auc',auc),current_step) summary_writer.add_summary(MakeSummary('validation/ap',ap),current_step) summary_writer.add_summary(MakeSummary('validation/ce_loss',ce_loss),current_step) return ce_loss
def test_step(): print 'Testing...' all_recon_loss = 0 all_output = [] all_label = [] batch_size = FLAGS.batch_size for i in range(int(len(test_order) / batch_size)): input_nlcd = get_data.get_nlcd( data, test_order[batch_size * i:batch_size * (i + 1)]) input_image = get_data.get_jpg_test( jpg_list, test_order[batch_size * i:batch_size * (i + 1)]) / 128.0 input_label = get_data.get_label( data, test_order[batch_size * i:batch_size * (i + 1)]) feed_dict = {} feed_dict[hg.input_nlcd] = input_nlcd feed_dict[hg.input_image] = input_image feed_dict[hg.input_label] = input_label feed_dict[hg.keep_prob] = 1.0 output = sess.run(hg.output, feed_dict) for i in output: all_output.append(i) for i in input_label: all_label.append(i) #recon_loss = all_recon_loss/(5094.0/batch_size) all_output = np.array(all_output) all_label = np.array(all_label) # auc = roc_auc_score(all_label,all_output) # all_output=np.reshape(all_output,(-1)) # all_label=np.reshape(all_label,(-1)) # ap = average_precision_score(all_label,all_output) # time_str = datetime.datetime.now().isoformat() # new_auc = roc_auc_score(all_label,all_output) # tempstr = "{}: auc {:g}, ap {:g}, recon_loss {:g}, new_auc {:g}".format(time_str, auc, ap, recon_loss, new_auc) # print(tempstr) all_output = np.reshape(all_output, (-1, 100)) all_label = np.reshape(all_label, (-1, 100)) return all_output, all_label
def test_step(): print 'Testing...' all_ce_loss = 0 all_output = [] all_label = [] batch_size = 18 for i in range(int(len(test_order)/batch_size)): input_image = get_data.get_jpg_test(jpg_list,test_order[batch_size*i:batch_size*(i+1)])/128.0 input_label = get_data.get_label(data,test_order[batch_size*i:batch_size*(i+1)]) input_nlcd = get_data.get_nlcd(data,test_order[batch_size*i:batch_size*(i+1)]) feed_dict={} feed_dict[hg.input_image]=input_image feed_dict[hg.input_label]=input_label feed_dict[hg.input_nlcd]=input_nlcd feed_dict[hg.keep_prob]=1.0 ce_loss,output= sess.run([hg.ce_loss,hg.output],feed_dict) all_ce_loss += ce_loss for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) #average_precision = average_precision_score(all_label,all_output) loglike = all_ce_loss/(int(len(test_order)/batch_size)) np.save('output.npy',all_output) np.save('label.npy',all_label) auc = roc_auc_score(all_label,all_output) #loglike = log_likelihood(all_label,all_output) time_str = datetime.datetime.now().isoformat() tempstr = "{}: auc {:g}, log_likelihood {:g}".format(time_str, auc,loglike) print(tempstr) all_output=np.reshape(all_output,(-1)) all_label=np.reshape(all_label,(-1)) ap = average_precision_score(all_label,all_output) auc_2 = roc_auc_score(all_label,all_output) print 'ap:'+str(ap) print 'auc_2:'+str(auc_2)
def test_step(): print 'testing...' # all_ce_loss = 0 # all_l2_loss = 0 # all_total_loss = 0 # all_output = [] # all_label = [] all_feature = [] batch_size = 17 * 3 for i in range(int(len(test_order) / batch_size)): input_image = get_data.get_jpg_test( jpg_list, test_order[batch_size * i:batch_size * (i + 1)]) input_label = get_data.get_label( data, test_order[batch_size * i:batch_size * (i + 1)]) feed_dict = {} feed_dict[hg.input_image] = input_image feature = sess.run(hg.feature, feed_dict) for i in feature: all_feature.append(i) # for i in input_label: # all_label.append(i) # all_output = np.array(all_output) # all_label = np.array(all_label) # #average_precision = average_precision_score(all_label,all_output) # np.save('output.npy',all_output) # np.save('label.npy',all_label) # auc = roc_auc_score(all_label,all_output) # loglike = log_likelihood(all_label,all_output) # time_str = datetime.datetime.now().isoformat() # tempstr = "{}: auc {:g}, log_likelihood {:g}".format(time_str, auc, loglike) # print(tempstr) # all_output=np.reshape(all_output,(-1)) # all_label=np.reshape(all_label,(-1)) # ap = average_precision_score(all_label,all_output) # auc_2 = roc_auc_score(all_label,all_output) # print 'ap:'+str(ap) # print 'auc_2:'+str(auc_2) np.save('resnet50_feature.npy', all_feature)
def test_step(): print 'Testing...' all_recon_loss = 0 all_output = [] all_label = [] batch_size = FLAGS.batch_size for i in range(int(len(test_order)/batch_size)): input_nlcd = get_data.get_nlcd(data,test_order[batch_size*i:batch_size*(i+1)]) input_image = get_data.get_jpg_test(jpg_list,test_order[batch_size*i:batch_size*(i+1)]) input_label = get_data.get_label(data,test_order[batch_size*i:batch_size*(i+1)]) feed_dict={} feed_dict[hg.input_nlcd]=input_nlcd feed_dict[hg.input_image]=input_image feed_dict[hg.input_label]=input_label feed_dict[hg.keep_prob]=1.0 output= sess.run(hg.output,feed_dict) for i in output: all_output.append(i) for i in input_label: all_label.append(i) #recon_loss = all_recon_loss/(5094.0/batch_size) all_output = np.array(all_output) all_label = np.array(all_label) # auc = roc_auc_score(all_label,all_output) # all_output=np.reshape(all_output,(-1)) # all_label=np.reshape(all_label,(-1)) # ap = average_precision_score(all_label,all_output) # time_str = datetime.datetime.now().isoformat() # new_auc = roc_auc_score(all_label,all_output) # tempstr = "{}: auc {:g}, ap {:g}, recon_loss {:g}, new_auc {:g}".format(time_str, auc, ap, recon_loss, new_auc) # print(tempstr) all_output=np.reshape(all_output,(-1,100)) all_label=np.reshape(all_label,(-1,100)) return all_output,all_label
def test_step(): print 'testing...' all_ce_loss = 0 all_l2_loss = 0 all_total_loss = 0 all_output = [] all_label = [] for i in range(int(len(test_order) / 18)): input_image = get_data.get_jpg_test( jpg_list, test_order[18 * i:18 * (i + 1)]) input_label = get_data.get_label(data, test_order[18 * i:18 * (i + 1)]) feed_dict = {} feed_dict[hg.input_image] = input_image output = sess.run(hg.output, feed_dict) for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) #average_precision = average_precision_score(all_label,all_output) np.save('output.npy', all_output) np.save('label.npy', all_label) auc = roc_auc_score(all_label, all_output) loglike = log_likelihood(all_label, all_output) time_str = datetime.datetime.now().isoformat() tempstr = "{}: auc {:g}, log_likelihood {:g}".format( time_str, auc, loglike) print(tempstr) all_output = np.reshape(all_output, (-1)) all_label = np.reshape(all_label, (-1)) ap = average_precision_score(all_label, all_output) auc_2 = roc_auc_score(all_label, all_output) print 'ap:' + str(ap) print 'auc_2:' + str(auc_2)
def test_step(): print 'testing...' # all_ce_loss = 0 # all_l2_loss = 0 # all_total_loss = 0 # all_output = [] # all_label = [] all_feature = [] batch_size=17*3 for i in range(int(len(test_order)/batch_size)): input_image = get_data.get_jpg_test(jpg_list,test_order[batch_size*i:batch_size*(i+1)]) input_label = get_data.get_label(data,test_order[batch_size*i:batch_size*(i+1)]) feed_dict={} feed_dict[hg.input_image]=input_image feature = sess.run(hg.feature,feed_dict) for i in feature: all_feature.append(i) # for i in input_label: # all_label.append(i) # all_output = np.array(all_output) # all_label = np.array(all_label) # #average_precision = average_precision_score(all_label,all_output) # np.save('output.npy',all_output) # np.save('label.npy',all_label) # auc = roc_auc_score(all_label,all_output) # loglike = log_likelihood(all_label,all_output) # time_str = datetime.datetime.now().isoformat() # tempstr = "{}: auc {:g}, log_likelihood {:g}".format(time_str, auc, loglike) # print(tempstr) # all_output=np.reshape(all_output,(-1)) # all_label=np.reshape(all_label,(-1)) # ap = average_precision_score(all_label,all_output) # auc_2 = roc_auc_score(all_label,all_output) # print 'ap:'+str(ap) # print 'auc_2:'+str(auc_2) np.save('resnet50_feature.npy',all_feature)
def test_step(): print 'testing...' all_ce_loss = 0 all_l2_loss = 0 all_total_loss = 0 all_output = [] all_label = [] for i in range(int(len(test_order)/18)): input_image = get_data.get_jpg_test(jpg_list,test_order[18*i:18*(i+1)]) input_label = get_data.get_label(data,test_order[18*i:18*(i+1)]) feed_dict={} feed_dict[hg.input_image]=input_image output= sess.run(hg.output,feed_dict) for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) #average_precision = average_precision_score(all_label,all_output) np.save('output.npy',all_output) np.save('label.npy',all_label) auc = roc_auc_score(all_label,all_output) loglike = log_likelihood(all_label,all_output) time_str = datetime.datetime.now().isoformat() tempstr = "{}: auc {:g}, log_likelihood {:g}".format(time_str, auc, loglike) print(tempstr) all_output=np.reshape(all_output,(-1)) all_label=np.reshape(all_label,(-1)) ap = average_precision_score(all_label,all_output) auc_2 = roc_auc_score(all_label,all_output) print 'ap:'+str(ap) print 'auc_2:'+str(auc_2)
def test_step(): print 'Testing...' # names = ['forest','human','ocean'] all_sample_z = [] batch_size = 17 # for i in range(len(orders)): # input_image = get_data.get_jpg_test(jpg_list,orders[i])/128.0 # input_nlcd = get_data.get_nlcd(data,orders[i]) # input_label = get_data.get_label(data,orders[i]) for i in range(int(len(orders) / batch_size)): input_image = get_data.get_jpg_test( jpg_list, orders[batch_size * i:batch_size * (i + 1)]) / 128.0 input_nlcd = get_data.get_nlcd( data, orders[batch_size * i:batch_size * (i + 1)]) input_label = get_data.get_label( data, orders[batch_size * i:batch_size * (i + 1)]) feed_dict = {} feed_dict[hg.input_nlcd] = input_nlcd feed_dict[hg.input_image] = input_image feed_dict[hg.input_label] = input_label feed_dict[hg.keep_prob] = 1.0 sample_z = sess.run(hg.sample_z, feed_dict) all_sample_z.append(sample_z) all_sample_z = np.array(all_sample_z) all_sample_z = np.reshape(all_sample_z, (-1, 100)) np.save('sample_z.npy', all_sample_z)
def test_step(): print 'Testing...' # names = ['forest','human','ocean'] all_sample_z=[] batch_size = 17 # for i in range(len(orders)): # input_image = get_data.get_jpg_test(jpg_list,orders[i])/128.0 # input_nlcd = get_data.get_nlcd(data,orders[i]) # input_label = get_data.get_label(data,orders[i]) for i in range(int(len(orders)/batch_size)): input_image = get_data.get_jpg_test(jpg_list,orders[batch_size*i:batch_size*(i+1)]) input_nlcd = get_data.get_nlcd(data,orders[batch_size*i:batch_size*(i+1)]) input_label = get_data.get_label(data,orders[batch_size*i:batch_size*(i+1)]) feed_dict={} feed_dict[hg.input_nlcd]=input_nlcd feed_dict[hg.input_image]=input_image feed_dict[hg.input_label]=input_label feed_dict[hg.keep_prob]=1.0 #sample_z= sess.run(hg.sample_z,feed_dict) sample_z= sess.run(hg.condition_miu,feed_dict) all_sample_z.append(sample_z) all_sample_z = np.array(all_sample_z) all_sample_z = np.reshape(all_sample_z,(-1,100)) np.save('condition_miu.npy',all_sample_z)
def test_step(): print 'Testing...' # all_ce_loss = 0 # all_output = [] # all_label = [] all_feature = [] batch_size = 23 for i in range(int(len(train_order) / batch_size)): input_image = get_data.get_jpg_test( train_jpg, train_order[batch_size * i:batch_size * (i + 1)]) / 128.0 input_label = get_data.get_label( train_label, train_order[batch_size * i:batch_size * (i + 1)]) feed_dict = {} feed_dict[hg.input_image] = input_image feed_dict[hg.input_label] = input_label feed_dict[hg.keep_prob] = 1.0 feature = sess.run(hg.feature, feed_dict) # all_ce_loss += ce_loss # for i in output: # all_output.append(i) # for i in input_label: # all_label.append(i) all_feature.append(feature) # all_output = np.array(all_output) # all_label = np.array(all_label) #average_precision = average_precision_score(all_label,all_output) all_feature = np.array(all_feature) all_feature = np.reshape(all_feature, (-1, 256)) np.save('train_feature.npy', all_feature)
def test_step(): print 'Testing...' all_recon_loss = 0 all_output = [] all_label = [] batch_size = FLAGS.batch_size for i in range(int(len(test_order) / batch_size)): input_image = get_data.get_jpg_test( test_jpg, test_order[batch_size * i:batch_size * (i + 1)]) / 128.0 input_label = get_data.get_label( test_label, test_order[batch_size * i:batch_size * (i + 1)]) feed_dict = {} feed_dict[hg.input_image] = input_image feed_dict[hg.input_label] = input_label feed_dict[hg.keep_prob] = 1.0 output = sess.run(hg.output, feed_dict) for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) all_output = np.reshape(all_output, (-1, 17)) all_label = np.reshape(all_label, (-1, 17)) return all_output, all_label
def test_step(): print 'Testing...' all_recon_loss = 0 all_output = [] all_label = [] batch_size = FLAGS.batch_size for i in range(int(len(test_order)/batch_size)): input_image = get_data.get_jpg_test(test_jpg,test_order[batch_size*i:batch_size*(i+1)])/128.0 input_label = get_data.get_label(test_label,test_order[batch_size*i:batch_size*(i+1)]) feed_dict={} feed_dict[hg.input_image]=input_image feed_dict[hg.input_label]=input_label feed_dict[hg.keep_prob]=1.0 output= sess.run(hg.output,feed_dict) for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) all_output=np.reshape(all_output,(-1,17)) all_label=np.reshape(all_label,(-1,17)) return all_output,all_label
def validation_step(): print 'Validating...' all_recon_loss = 0 all_kl_loss = 0 all_vae_loss = 0 all_l2_loss = 0 all_total_loss = 0 all_output = [] all_label = [] for i in range(int(len(validation_order)/18)): input_image = get_data.get_jpg_test(jpg_list,validation_order[18*i:18*(i+1)]) input_nlcd = get_data.get_nlcd(data,validation_order[18*i:18*(i+1)]) input_label = get_data.get_label(data,validation_order[18*i:18*(i+1)]) feed_dict={} feed_dict[hg.input_nlcd]=input_nlcd feed_dict[hg.input_label]=input_label feed_dict[hg.input_image]=input_image feed_dict[hg.keep_prob]=1.0 recon_loss,kl_loss,vae_loss,l2_loss,total_loss,output= sess.run([hg.recon_loss,hg.kl_loss,hg.vae_loss,hg.l2_loss,hg.total_loss,hg.output],feed_dict) all_recon_loss += recon_loss all_kl_loss += kl_loss all_vae_loss += vae_loss all_l2_loss += l2_loss all_total_loss += total_loss for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) #average_precision = average_precision_score(all_label,all_output) auc = roc_auc_score(all_label,all_output) recon_loss = all_recon_loss/283.0 kl_loss = all_kl_loss/283.0 vae_loss = all_vae_loss/283.0 l2_loss = all_l2_loss/283.0 total_loss = all_total_loss/283.0 all_output=np.reshape(all_output,(-1)) all_label=np.reshape(all_label,(-1)) ap = average_precision_score(all_label,all_output) time_str = datetime.datetime.now().isoformat() tempstr = "{}: auc {:g}, ap {:g}, recon_loss {:g}, kl_loss {:g}, vae_loss {:g}, l2_loss {:g}, total_loss {:g}".format(time_str, auc, ap, recon_loss, kl_loss, vae_loss, l2_loss, total_loss) print(tempstr) summary_writer.add_summary(MakeSummary('validation/auc',auc),current_step) summary_writer.add_summary(MakeSummary('validation/ap',ap),current_step) summary_writer.add_summary(MakeSummary('validation/recon_loss',recon_loss),current_step) summary_writer.add_summary(MakeSummary('validation/kl_loss',kl_loss),current_step) summary_writer.add_summary(MakeSummary('validation/vae_loss',vae_loss),current_step) return vae_loss
def main(_): print 'reading npy...' data = np.load('../1st.npy') jpg_list = np.load('128bin.npy') train_order = np.load('../train.npy') validation_order = np.load('../validation.npy') one_epoch_iter = len(train_order)/FLAGS.batch_size print 'reading finished' sess = tf.Session() print 'building network...' hg = vae.vae(is_training=True) global_step = tf.Variable(0,name='global_step',trainable=False) learning_rate = tf.train.exponential_decay(FLAGS.learning_rate,global_step,0.5*FLAGS.max_epoch*len(train_order)/FLAGS.batch_size,1.0,staircase=True) tf.summary.scalar('learning_rate', learning_rate) optimizer = tf.train.AdamOptimizer(learning_rate) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(hg.total_loss,global_step=global_step) merged_summary = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(FLAGS.summary_dir,sess.graph) sess.run(tf.initialize_all_variables()) saver = tf.train.Saver(max_to_keep=None) print 'building finished' #def train_step(input_nlcd,input_label,smooth_ce_loss,smooth_l2_loss,smooth_total_loss): def train_step(input_label,input_nlcd,input_image): feed_dict={} feed_dict[hg.input_nlcd]=input_nlcd feed_dict[hg.input_label]=input_label feed_dict[hg.input_image]=input_image feed_dict[hg.keep_prob]=0.8 temp,step,recon_loss,kl_loss,vae_loss,l2_loss,total_loss,summary,output = sess.run([train_op,global_step,hg.recon_loss,hg.kl_loss,hg.vae_loss,hg.l2_loss,hg.total_loss,merged_summary,hg.output],feed_dict) time_str = datetime.datetime.now().isoformat() summary_writer.add_summary(summary,step) return output, recon_loss, kl_loss, vae_loss, l2_loss, total_loss def validation_step(): print 'Validating...' all_recon_loss = 0 all_kl_loss = 0 all_vae_loss = 0 all_l2_loss = 0 all_total_loss = 0 all_output = [] all_label = [] for i in range(int(len(validation_order)/18)): input_image = get_data.get_jpg_test(jpg_list,validation_order[18*i:18*(i+1)]) input_nlcd = get_data.get_nlcd(data,validation_order[18*i:18*(i+1)]) input_label = get_data.get_label(data,validation_order[18*i:18*(i+1)]) feed_dict={} feed_dict[hg.input_nlcd]=input_nlcd feed_dict[hg.input_label]=input_label feed_dict[hg.input_image]=input_image feed_dict[hg.keep_prob]=1.0 recon_loss,kl_loss,vae_loss,l2_loss,total_loss,output= sess.run([hg.recon_loss,hg.kl_loss,hg.vae_loss,hg.l2_loss,hg.total_loss,hg.output],feed_dict) all_recon_loss += recon_loss all_kl_loss += kl_loss all_vae_loss += vae_loss all_l2_loss += l2_loss all_total_loss += total_loss for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) #average_precision = average_precision_score(all_label,all_output) auc = roc_auc_score(all_label,all_output) recon_loss = all_recon_loss/283.0 kl_loss = all_kl_loss/283.0 vae_loss = all_vae_loss/283.0 l2_loss = all_l2_loss/283.0 total_loss = all_total_loss/283.0 all_output=np.reshape(all_output,(-1)) all_label=np.reshape(all_label,(-1)) ap = average_precision_score(all_label,all_output) time_str = datetime.datetime.now().isoformat() tempstr = "{}: auc {:g}, ap {:g}, recon_loss {:g}, kl_loss {:g}, vae_loss {:g}, l2_loss {:g}, total_loss {:g}".format(time_str, auc, ap, recon_loss, kl_loss, vae_loss, l2_loss, total_loss) print(tempstr) summary_writer.add_summary(MakeSummary('validation/auc',auc),current_step) summary_writer.add_summary(MakeSummary('validation/ap',ap),current_step) summary_writer.add_summary(MakeSummary('validation/recon_loss',recon_loss),current_step) summary_writer.add_summary(MakeSummary('validation/kl_loss',kl_loss),current_step) summary_writer.add_summary(MakeSummary('validation/vae_loss',vae_loss),current_step) return vae_loss best_vae_loss = 10000 best_iter = 0 smooth_recon_loss=0.0 smooth_kl_loss=0.0 smooth_vae_loss=0.0 smooth_l2_loss=0.0 smooth_total_loss=0.0 temp_label=[] temp_output=[] for one_epoch in range(FLAGS.max_epoch): print('epoch '+str(one_epoch+1)+' starts!') np.random.shuffle(train_order) for i in range(int(len(train_order)/float(FLAGS.batch_size))): start = i*FLAGS.batch_size end = (i+1)*FLAGS.batch_size input_image = get_data.get_jpg_test(jpg_list,train_order[start:end]) input_nlcd = get_data.get_nlcd(data,train_order[start:end]) input_label = get_data.get_label(data,train_order[start:end]) output, recon_loss, kl_loss, vae_loss, l2_loss, total_loss = train_step(input_label,input_nlcd,input_image) smooth_recon_loss+=recon_loss smooth_kl_loss+=kl_loss smooth_vae_loss+=vae_loss smooth_l2_loss+=l2_loss smooth_total_loss+=total_loss temp_label.append(input_label) temp_output.append(output) current_step = tf.train.global_step(sess,global_step) if current_step%10==0: recon_loss=smooth_recon_loss/10.0 kl_loss=smooth_kl_loss/10.0 vae_loss=smooth_vae_loss/10.0 l2_loss=smooth_l2_loss/10.0 total_loss=smooth_total_loss/10.0 temp_output = np.reshape(np.array(temp_output),(-1)) temp_label = np.reshape(np.array(temp_label),(-1)) ap = average_precision_score(temp_label,temp_output) temp_output = np.reshape(temp_output,(-1,100)) temp_label = np.reshape(temp_label,(-1,100)) try: auc = roc_auc_score(temp_label,temp_output) except ValueError: print 'ytrue error for auc' else: time_str = datetime.datetime.now().isoformat() tempstr = "{}: step {}, auc {:g}, ap {:g}, recon_loss {:g}, kl_loss {:g}, vae_loss {:g}, l2_loss {:g}, total_loss {:g}".format(time_str, current_step, auc, ap, recon_loss, kl_loss, vae_loss, l2_loss, total_loss) print(tempstr) summary_writer.add_summary(MakeSummary('train/auc',auc),current_step) summary_writer.add_summary(MakeSummary('train/ap',ap),current_step) temp_output=[] temp_label=[] smooth_recon_loss = 0 smooth_kl_loss = 0 smooth_vae_loss = 0 smooth_l2_loss = 0 smooth_total_loss = 0 if current_step%int(one_epoch_iter*FLAGS.save_epoch)==0: vae_loss = validation_step() if vae_loss<best_vae_loss: print 'currently the vae_loss is over the previous best one!!!' best_vae_loss=vae_loss best_iter = current_step print 'saving model' path = saver.save(sess,FLAGS.model_dir+'fc_model',global_step=current_step) print 'have saved model to '+path print 'training has been finished !' print 'the best vae_loss on validation is '+str(best_vae_loss) print 'the best checkpoint is '+str(best_iter)
def main(_): print 'reading npy...' data = np.load('../1st.npy') jpg_list = np.load('128bin.npy') train_order = np.load('../train.npy') validation_order = np.load('../validation.npy') one_epoch_iter = len(train_order) / FLAGS.batch_size print 'reading finished' sess = tf.Session() print 'building network...' hg = vae.vae(is_training=True) global_step = tf.Variable(0, name='global_step', trainable=False) learning_rate = tf.train.exponential_decay( FLAGS.learning_rate, global_step, 0.5 * FLAGS.max_epoch * len(train_order) / FLAGS.batch_size, 1.0, staircase=True) tf.summary.scalar('learning_rate', learning_rate) optimizer = tf.train.AdamOptimizer(learning_rate) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(hg.total_loss, global_step=global_step) merged_summary = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph) sess.run(tf.initialize_all_variables()) saver = tf.train.Saver(max_to_keep=None) print 'building finished' #def train_step(input_nlcd,input_label,smooth_ce_loss,smooth_l2_loss,smooth_total_loss): def train_step(input_label, input_nlcd, input_image): feed_dict = {} feed_dict[hg.input_nlcd] = input_nlcd feed_dict[hg.input_label] = input_label feed_dict[hg.input_image] = input_image feed_dict[hg.keep_prob] = 0.8 temp, step, recon_loss, kl_loss, vae_loss, l2_loss, total_loss, summary, output = sess.run( [ train_op, global_step, hg.recon_loss, hg.kl_loss, hg.vae_loss, hg.l2_loss, hg.total_loss, merged_summary, hg.output ], feed_dict) time_str = datetime.datetime.now().isoformat() summary_writer.add_summary(summary, step) return output, recon_loss, kl_loss, vae_loss, l2_loss, total_loss def validation_step(): print 'Validating...' all_recon_loss = 0 all_kl_loss = 0 all_vae_loss = 0 all_l2_loss = 0 all_total_loss = 0 all_output = [] all_label = [] for i in range(int(len(validation_order) / 18)): input_image = get_data.get_jpg_test( jpg_list, validation_order[18 * i:18 * (i + 1)]) input_nlcd = get_data.get_nlcd( data, validation_order[18 * i:18 * (i + 1)]) input_label = get_data.get_label( data, validation_order[18 * i:18 * (i + 1)]) feed_dict = {} feed_dict[hg.input_nlcd] = input_nlcd feed_dict[hg.input_label] = input_label feed_dict[hg.input_image] = input_image feed_dict[hg.keep_prob] = 1.0 recon_loss, kl_loss, vae_loss, l2_loss, total_loss, output = sess.run( [ hg.recon_loss, hg.kl_loss, hg.vae_loss, hg.l2_loss, hg.total_loss, hg.output ], feed_dict) all_recon_loss += recon_loss all_kl_loss += kl_loss all_vae_loss += vae_loss all_l2_loss += l2_loss all_total_loss += total_loss for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) #average_precision = average_precision_score(all_label,all_output) auc = roc_auc_score(all_label, all_output) recon_loss = all_recon_loss / 283.0 kl_loss = all_kl_loss / 283.0 vae_loss = all_vae_loss / 283.0 l2_loss = all_l2_loss / 283.0 total_loss = all_total_loss / 283.0 all_output = np.reshape(all_output, (-1)) all_label = np.reshape(all_label, (-1)) ap = average_precision_score(all_label, all_output) time_str = datetime.datetime.now().isoformat() tempstr = "{}: auc {:g}, ap {:g}, recon_loss {:g}, kl_loss {:g}, vae_loss {:g}, l2_loss {:g}, total_loss {:g}".format( time_str, auc, ap, recon_loss, kl_loss, vae_loss, l2_loss, total_loss) print(tempstr) summary_writer.add_summary(MakeSummary('validation/auc', auc), current_step) summary_writer.add_summary(MakeSummary('validation/ap', ap), current_step) summary_writer.add_summary( MakeSummary('validation/recon_loss', recon_loss), current_step) summary_writer.add_summary(MakeSummary('validation/kl_loss', kl_loss), current_step) summary_writer.add_summary( MakeSummary('validation/vae_loss', vae_loss), current_step) return vae_loss best_vae_loss = 10000 best_iter = 0 smooth_recon_loss = 0.0 smooth_kl_loss = 0.0 smooth_vae_loss = 0.0 smooth_l2_loss = 0.0 smooth_total_loss = 0.0 temp_label = [] temp_output = [] for one_epoch in range(FLAGS.max_epoch): print('epoch ' + str(one_epoch + 1) + ' starts!') np.random.shuffle(train_order) for i in range(int(len(train_order) / float(FLAGS.batch_size))): start = i * FLAGS.batch_size end = (i + 1) * FLAGS.batch_size input_image = get_data.get_jpg_test(jpg_list, train_order[start:end]) input_nlcd = get_data.get_nlcd(data, train_order[start:end]) input_label = get_data.get_label(data, train_order[start:end]) output, recon_loss, kl_loss, vae_loss, l2_loss, total_loss = train_step( input_label, input_nlcd, input_image) smooth_recon_loss += recon_loss smooth_kl_loss += kl_loss smooth_vae_loss += vae_loss smooth_l2_loss += l2_loss smooth_total_loss += total_loss temp_label.append(input_label) temp_output.append(output) current_step = tf.train.global_step(sess, global_step) if current_step % 10 == 0: recon_loss = smooth_recon_loss / 10.0 kl_loss = smooth_kl_loss / 10.0 vae_loss = smooth_vae_loss / 10.0 l2_loss = smooth_l2_loss / 10.0 total_loss = smooth_total_loss / 10.0 temp_output = np.reshape(np.array(temp_output), (-1)) temp_label = np.reshape(np.array(temp_label), (-1)) ap = average_precision_score(temp_label, temp_output) temp_output = np.reshape(temp_output, (-1, 100)) temp_label = np.reshape(temp_label, (-1, 100)) try: auc = roc_auc_score(temp_label, temp_output) except ValueError: print 'ytrue error for auc' else: time_str = datetime.datetime.now().isoformat() tempstr = "{}: step {}, auc {:g}, ap {:g}, recon_loss {:g}, kl_loss {:g}, vae_loss {:g}, l2_loss {:g}, total_loss {:g}".format( time_str, current_step, auc, ap, recon_loss, kl_loss, vae_loss, l2_loss, total_loss) print(tempstr) summary_writer.add_summary(MakeSummary('train/auc', auc), current_step) summary_writer.add_summary(MakeSummary('train/ap', ap), current_step) temp_output = [] temp_label = [] smooth_recon_loss = 0 smooth_kl_loss = 0 smooth_vae_loss = 0 smooth_l2_loss = 0 smooth_total_loss = 0 if current_step % int(one_epoch_iter * FLAGS.save_epoch) == 0: vae_loss = validation_step() if vae_loss < best_vae_loss: print 'currently the vae_loss is over the previous best one!!!' best_vae_loss = vae_loss best_iter = current_step print 'saving model' path = saver.save(sess, FLAGS.model_dir + 'fc_model', global_step=current_step) print 'have saved model to ' + path print 'training has been finished !' print 'the best vae_loss on validation is ' + str(best_vae_loss) print 'the best checkpoint is ' + str(best_iter)
def validation_step(): print 'Validating...' all_recon_loss = 0 all_kl_loss = 0 all_vae_loss = 0 all_l2_loss = 0 all_total_loss = 0 all_output = [] all_label = [] valid_batch = 23 for i in range(int(len(validation_order) / valid_batch)): input_image = get_data.get_jpg_test( validation_jpg, validation_order[valid_batch * i:valid_batch * (i + 1)]) input_label = get_data.get_label( validation_label, validation_order[valid_batch * i:valid_batch * (i + 1)]) feed_dict = {} feed_dict[hg.input_label] = input_label feed_dict[hg.input_image] = input_image feed_dict[hg.keep_prob] = 1.0 recon_loss, kl_loss, vae_loss, l2_loss, total_loss, output = sess.run( [ hg.recon_loss, hg.kl_loss, hg.vae_loss, hg.l2_loss, hg.total_loss, hg.output ], feed_dict) all_recon_loss += recon_loss all_kl_loss += kl_loss all_vae_loss += vae_loss all_l2_loss += l2_loss all_total_loss += total_loss for i in output: all_output.append(i) for i in input_label: all_label.append(i) all_output = np.array(all_output) all_label = np.array(all_label) #average_precision = average_precision_score(all_label,all_output) recon_loss = all_recon_loss / (4048 / valid_batch) kl_loss = all_kl_loss / (4048 / valid_batch) vae_loss = all_vae_loss / (4048 / valid_batch) l2_loss = all_l2_loss / (4048 / valid_batch) total_loss = all_total_loss / (4048 / valid_batch) all_output = np.reshape(all_output, (-1)) all_label = np.reshape(all_label, (-1)) ap = average_precision_score(all_label, all_output) auc = roc_auc_score(all_label, all_output) time_str = datetime.datetime.now().isoformat() tempstr = "{}: auc {:g}, ap {:g}, recon_loss {:g}, kl_loss {:g}, vae_loss {:g}, l2_loss {:g}, total_loss {:g}".format( time_str, auc, ap, recon_loss, kl_loss, vae_loss, l2_loss, total_loss) print(tempstr) summary_writer.add_summary(MakeSummary('validation/auc', auc), current_step) summary_writer.add_summary(MakeSummary('validation/ap', ap), current_step) summary_writer.add_summary( MakeSummary('validation/recon_loss', recon_loss), current_step) summary_writer.add_summary(MakeSummary('validation/kl_loss', kl_loss), current_step) summary_writer.add_summary( MakeSummary('validation/vae_loss', vae_loss), current_step) return vae_loss