Exemplo n.º 1
0
def run():
    net = network.bn_inception(pretrained=True)
    embed.embed(net, sz_embedding=cfg.EMBEDDING_WIDTH, normalize_output=True)
    if cfg.USE_CUDA == 1:
        net.cuda()
    print("Load model params")
    net.load_state_dict(
        torch.load(cfg.MODEL_PATH + str(cfg.MARGIN) +
                   str(cfg.EMBEDDING_WIDTH) + str(cfg.DATASET) +
                   str(cfg.METRIC_LOSS_PARAM) + 'x' + str(cfg.METHOD) +
                   str(cfg.CE_LOSS_PARAM) + 'x' + str(cfg.SOFTMAX_METHOD) +
                   str(cfg.K) + str(cfg.POS_SAMPLE_NUM) + ".pkl"))

    print("Index all dataset")
    preprocess = Preprocess(root=cfg.DATA_ROOT,
                            use_cuda=cfg.USE_CUDA,
                            test_batch_size=cfg.TEST_BATCH_SIZE,
                            method=cfg.METHOD,
                            dataset_name=cfg.DATASET,
                            with_bounding_box=cfg.WITH_BOUNDING_BOX,
                            download=cfg.DOWNLOAD)
    print("Done!")
    if cfg.METHOD == 0:
        metric = 'cosine'
    else:
        metric = 'euclidean'
    print("embd_size=", cfg.EMBEDDING_WIDTH, "dataset=", cfg.DATASET)

    if cfg.METHOD == 0:
        print("tau=", cfg.TAU, "K=", cfg.K, "N=", cfg.N, "N+=",
              cfg.POS_SAMPLE_NUM, "embd_width=", cfg.EMBEDDING_WIDTH,
              "batch_size=", cfg.BATCH_SIZE, 'margin=', cfg.MARGIN)
    print("softmax_rate:", cfg.CE_LOSS_PARAM, "metric_rate:",
          cfg.METRIC_LOSS_PARAM)

    preck_test, recallk_test, X1, T1, path1 = val.test(net,
                                                       preprocess.test_loader,
                                                       cfg.TEST_K, metric)
    preck_train, recallk_train, X2, T2, path2 = val.test(
        net, preprocess.test_train_loader, cfg.TEST_K, metric)

    data = X1.cpu().numpy()
    label = T1.cpu().numpy()
    n_samples = len(path1)
    n_features = 64
    X2.cpu().numpy()

    print('recall@1 in test set: ', str(recallk_test[0]))
    print('recall@1 in train set: ', str(recallk_train[0]))

    print('Computing t-SNE embedding')
    tsne = TSNE(n_components=2, init='pca', random_state=0)

    result = tsne.fit_transform(data)

    fig = plot_embedding(result, path1)
    fig.savefig("str(cfg.DATASET)+tsne.jpg")
Exemplo n.º 2
0
def health_from_file(file_path):
    text = []
    with open(file_path) as file:
        for line in file:
            text.append(line)

    # Reduce logging output.
    tf.logging.set_verbosity(tf.logging.ERROR)

    with tf.Session() as session:
        session.run(
            [tf.global_variables_initializer(),
             tf.tables_initializer()])
        ave_embed = tf.reduce_mean(embed(text), 0)
        happy_stat = similarity(happy, ave_embed) - similarity(sad, ave_embed)
        feminine_stat = similarity(feminine, ave_embed) - similarity(
            masculine, ave_embed)
        wealthy_stat = similarity(wealthy, ave_embed) - similarity(
            poor, ave_embed)
        yin_stat = similarity(yin, ave_embed) - similarity(yang, ave_embed)
        liberal_stat = similarity(liberal, ave_embed) - similarity(
            conservative, ave_embed)
        future_stat = similarity(future, ave_embed) - similarity(
            past, ave_embed)
        anger_stat = similarity(anger, ave_embed) - similarity(
            peace, ave_embed)
        stat = tf.stack([
            happy_stat, feminine_stat, wealthy_stat, yin_stat, liberal_stat,
            future_stat, anger_stat
        ])
        computed_stat = session.run(stat)
        print("happy feminine wealthy yin liberal future anger")
        print(computed_stat)
        return computed_stat
def run(flag):
    print(cfg.NET)

    if cfg.NET == "bn_inception_v2":
        net_id = 1
    if cfg.NET == "densenet201":
        net_id = 0
    if "CUB" in cfg.DATASET:
        dataset_id = 0
    if "CAR" in cfg.DATASET:
        dataset_id = 1
    if "Online" in cfg.DATASET:
        dataset_id = 2
    prefix = "model_param/paper_result/"
    net_list = ["densenet201_", "bn_inception_v2_"]
    dataset_list = ["cub200_2011_", "cars196_", "online_"]
    method_list = [
        "angular", "contrastive", "hardmining", "lifted", "npair", "preck",
        "proxynca", "samplingmatters", "semihard", "triplet"
    ]
    preck_test = [0, 0, 0, 0, 0, 0, 0]
    recallk_test = [0, 0, 0, 0, 0, 0, 0]
    nmi, mAP, PR, ROC, F1 = 0, 0, 0, 0, 0

    name = prefix + net_list[net_id] + dataset_list[dataset_id] + method_list[
        flag]
    print(name)
    if cfg.METHOD == 0 or cfg.METHOD == 8:
        metric = 'cosine'
    else:
        metric = 'euclidean'

    if not os.path.exists(name + '.npz'):
        if cfg.NET == "bn_inception_v2":
            net = network.bn_inception(pretrained=True)
            net_id = 1
        if cfg.NET == "densenet201":
            net = model.densenet201(pretrained=True)
            net_id = 0
        embed.embed(net,
                    sz_embedding=cfg.EMBEDDING_WIDTH,
                    normalize_output=True,
                    net_id=cfg.NET)

        if cfg.USE_CUDA == 1:
            net.cuda()
        print("Load model params")
        #print(cfg.MODEL_PATH+str(cfg.MARGIN)+str(cfg.EMBEDDING_WIDTH)+str(cfg.DATASET)+str(cfg.METRIC_LOSS_PARAM)+'x'+str(cfg.METHOD)+str(cfg.CE_LOSS_PARAM)+'x'+str(cfg.SOFTMAX_METHOD)+str(cfg.K)+str(cfg.POS_SAMPLE_NUM)+".pkl")
        #net.load_state_dict(torch.load(cfg.MODEL_PATH+str(cfg.MARGIN)+str(cfg.EMBEDDING_WIDTH)+str(cfg.DATASET)+str(cfg.METRIC_LOSS_PARAM)+'x'+str(cfg.METHOD)+str(cfg.CE_LOSS_PARAM)+'x'+str(cfg.SOFTMAX_METHOD)+str(cfg.K)+str(cfg.POS_SAMPLE_NUM)+".pkl"))

        net.load_state_dict(torch.load(name + ".pkl"))

        print("Index all dataset")
        preprocess = Preprocess(root=cfg.DATA_ROOT,
                                use_cuda=cfg.USE_CUDA,
                                train_batch_size=cfg.BATCH_SIZE,
                                test_batch_size=cfg.TEST_BATCH_SIZE,
                                method=cfg.METHOD,
                                dataset_name=cfg.DATASET,
                                with_bounding_box=cfg.WITH_BOUNDING_BOX,
                                download=cfg.DOWNLOAD,
                                n_pos=cfg.POS_SAMPLE_NUM,
                                N=cfg.N)
        print("Done!")

        print("embd_size=", cfg.EMBEDDING_WIDTH, "dataset=", cfg.DATASET)

        if cfg.METHOD == 0 or cfg.METHOD == 8:
            print("tau=", cfg.TAU, "K=", cfg.K, "N=", cfg.N, "N+=",
                  cfg.POS_SAMPLE_NUM, "embd_width=", cfg.EMBEDDING_WIDTH,
                  "batch_size=", cfg.BATCH_SIZE, 'margin=', cfg.MARGIN)

        X1, T1, path1 = val.inference(net, preprocess.test_loader)
        #X2, T2, path2=val.inference(net,preprocess.test_train_loader)
        print("inference finished!")
        X1 = X1.numpy()
        T1 = T1.numpy()
        np.savez(name, X1, T1, path1)
    else:
        npzfile = np.load(name + '.npz')
        X1 = npzfile['arr_0']
        T1 = npzfile['arr_1']
        path1 = npzfile['arr_2']

    X1 = torch.from_numpy(X1)
    T1 = torch.from_numpy(T1)

    #val.get_some_case(X1, T1, path1, 10, metric)
    preck_test, recallk_test = val.test(X1, T1, cfg.TEST_K, metric)
    #print("prec@K:",preck_test,"recall@K:",recallk_test)
    #preck_train,recallk_train=val.test(X2, T2,cfg.TEST_K,metric)
    #print("prec@K:",preck_train,"recall@K:",recallk_train)

    nmi, mAP, PR, ROC, F1 = val.test_some_scores(X1, T1, path1, metric,
                                                 cfg.TEST_CLASS, name)

    show_result = name + "&" + str('%.2f' % preck_test[0]) + "&" + str(
        '%.2f' %
        preck_test[1]) + "&" + str('%.2f' % preck_test[2]) + "&" + str(
            '%.2f' % preck_test[3]) + "&" + str(
                '%.2f' % recallk_test[1]) + "&" + str(
                    '%.2f' % recallk_test[2]) + "&" + str(
                        '%.2f' % recallk_test[3]) + "&" + str(
                            '%.2f' % (nmi * 100)) + "&" + str(
                                '%.2f' %
                                (mAP * 100)) + "&" + str('%.2f' %
                                                         (F1 * 100)) + "\\\\"
    print(show_result)
Exemplo n.º 4
0
def run(cfg):
	print(cfg.NET)
	if cfg.NET=="bn_inception_v2":
		net = network.bn_inception(pretrained = True)
	if cfg.NET=="densenet201":
		net = model.densenet201(pretrained = True)
	embed.embed(net, sz_embedding=cfg.EMBEDDING_WIDTH,normalize_output = True, net_id = cfg.NET)

	if cfg.USE_CUDA==1: 
		net.cuda()
	metric_loss = get_loss(n_input=cfg.N, k=cfg.K, tau=cfg.TAU,n_pos=cfg.POS_SAMPLE_NUM, margin=cfg.MARGIN,input_dim=cfg.EMBEDDING_WIDTH,output_dim=cfg.TRAIN_CLASS,batch_size=cfg.BATCH_SIZE,method=cfg.METHOD).cuda()
	softmax_loss = get_loss(input_dim=cfg.EMBEDDING_WIDTH,output_dim=cfg.TRAIN_CLASS,margin=cfg.SOFTMAX_MARGIN,method=cfg.SOFTMAX_METHOD).cuda()

	optimizer = torch.optim.Adam(
    [
        { # embedding parameters
            'params': net.embedding_layer.parameters(), 
            'lr' : cfg.EMBD_LR
        },
        { # softmax loss parameters
            'params': softmax_loss.parameters(), 
            'lr': cfg.SOFTMAX_LOSS_LR
        },
        { # architecture parameters, excluding embedding layer
            'params': list(
                set(
                    net.parameters()
                ).difference(
                    set(net.embedding_layer.parameters())
                )
            ), 
            'lr' : cfg.NET_LR
        },
        { # metric loss parameters
            'params': metric_loss.parameters(), 
            'lr': cfg.METRIC_LOSS_LR
        },
    ],
    eps = 1e-2,
    weight_decay = cfg.WEIGHT_DECAY
)
	#for i in metric_loss.named_parameters():
	#	print(i)
	model_name=cfg.MODEL_PATH+str(cfg.MARGIN)+str(cfg.EMBEDDING_WIDTH)+str(cfg.METRIC_LOSS_PARAM)+'x'+str(cfg.METHOD)+str(cfg.CE_LOSS_PARAM)+'x'+str(cfg.SOFTMAX_METHOD)+str(cfg.K)+str(cfg.POS_SAMPLE_NUM)+".pkl"
	scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, cfg.SCHEDULER_STEP, gamma = cfg.GAMMA)    #[10,30,70]
	print("metric_rate:",cfg.METRIC_LOSS_PARAM,"softmax_rate:",cfg.CE_LOSS_PARAM)
	if cfg.TRAINING_OLD==1:
		print("Load model params")
		net.load_state_dict(torch.load(model_name+"62.95"))

	preprocess = Preprocess(root=cfg.DATA_ROOT,use_cuda=cfg.USE_CUDA,train_batch_size=cfg.BATCH_SIZE,test_batch_size=cfg.TEST_BATCH_SIZE,method=cfg.METHOD,dataset_name=cfg.DATASET,with_bounding_box=cfg.WITH_BOUNDING_BOX,download=cfg.DOWNLOAD,n_pos=cfg.POS_SAMPLE_NUM,N=cfg.N)
	if cfg.METHOD==0 or cfg.METHOD==8:
		metric = 'cosine'
	else:
		metric = 'euclidean'
	print("embd_size=",cfg.EMBEDDING_WIDTH,"dataset=",cfg.DATASET,"batch_size=",cfg.BATCH_SIZE,"GPU ID=",cfg.GPU_NUM)
	print("EMBD_LR=",cfg.EMBD_LR,"SOFTMAX_LOSS_LR=",cfg.SOFTMAX_LOSS_LR,"NET_LR=",cfg.NET_LR)
	if cfg.METHOD==0:
		print("tau=",cfg.TAU,"K=",cfg.K,"N=",cfg.N,"N+=",cfg.POS_SAMPLE_NUM,cfg.BATCH_SIZE,'margin=',cfg.MARGIN)
	
	run_num=0
	sparsity=0
	err_pos=0
	old_err_pos=0
	total_sparsity=0
	total_err_pos=0
	totalEpochLoss=0
	flag=0
	#X1, T1, _=val.inference(net,preprocess.test_loader)
	#save_log(X1,T1,metric,run_num,totalEpochLoss,total_sparsity,net,model_name)
	for epoch in range(cfg.EPOCH):
	#train
		scheduler.step()
		train_loader=iter(preprocess.train_loader)
		
		for _ in tqdm(range(len(preprocess.train_loader))):
			if run_num%cfg.SHOW_PER_ITER==cfg.SHOW_PER_ITER-1:
				X1=0
				T1=0
				X1, T1, _=val.inference(net,preprocess.test_loader)
				#X2, T2, _=val.inference(net,preprocess.test_train_loader)
				#
				if cfg.MULTI_THREAD == 1:
					t = threading.Thread(target=save_log,args=(X1,T1,metric,run_num,totalEpochLoss,total_err_pos,net,model_name))#create threading
					t.setDaemon(True)
					t.start()
				else:
					save_log(X1,T1,metric,run_num,totalEpochLoss,total_sparsity,net,model_name)
				totalEpochLoss=0

			data_ = train_loader.next()
			batch_img, real_y, img_name = data_

			optimizer.zero_grad()
			out=net(batch_img.cuda())
			loss_metric,err_pos,sparsity=metric_loss(out,real_y.cuda())
			loss=0#cfg.METRIC_LOSS_PARAM*loss_metric#+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y.cuda())
			loss+=loss_metric

			total_err_pos=err_pos
			totalEpochLoss=totalEpochLoss+loss.data/cfg.SHOW_PER_ITER
			if math.isnan(loss.data)==False:
				loss.backward()
				optimizer.step()
			else:
				print(loss.data)
				
			run_num+=1
		print("\r\nEpoch:",epoch,"tau:",cfg.TAU)
Exemplo n.º 5
0
def run():
	net = network.bn_inception(pretrained = True)
	embed.embed(net, sz_embedding=cfg.EMBEDDING_WIDTH,normalize_output = True)
	if cfg.USE_CUDA==1: 
		net.cuda()
	metric_loss = get_loss(n_input=cfg.N, k=cfg.K, tau=cfg.TAU,n_pos=cfg.POS_SAMPLE_NUM, margin=cfg.MARGIN,input_dim=cfg.EMBEDDING_WIDTH,output_dim=cfg.TRAIN_CLASS,batch_size=cfg.BATCH_SIZE,method=cfg.METHOD)
	softmax_loss = get_loss(input_dim=cfg.EMBEDDING_WIDTH,output_dim=cfg.TRAIN_CLASS,margin=cfg.SOFTMAX_MARGIN,method=cfg.SOFTMAX_METHOD)
	
	optimizer = torch.optim.Adam(
    [
        { # embedding parameters
            'params': net.embedding_layer.parameters(), 
            'lr' : cfg.EMBD_LR
        },
        { # softmax loss parameters
            'params': softmax_loss.parameters(), 
            'lr': cfg.SOFTMAX_LOSS_LR
        },
        { # inception parameters, excluding embedding layer
            'params': list(
                set(
                    net.parameters()
                ).difference(
                    set(net.embedding_layer.parameters())
                )
            ), 
            'lr' : cfg.NET_LR
        }
    ],
    eps = 1e-2,
    weight_decay = cfg.WEIGHT_DECAY
)
	
	scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [3, 10, 16],gamma = cfg.GAMMA)
	
	if cfg.TRAINING_OLD==1:
		print("Load model params")
		net.load_state_dict(torch.load(cfg.MODEL_PATH+str(cfg.MARGIN)+str(cfg.EMBEDDING_WIDTH)+str(cfg.DATASET)+str(cfg.METRIC_LOSS_PARAM)+'x'+str(cfg.METHOD)+str(cfg.CE_LOSS_PARAM)+'x'+str(cfg.SOFTMAX_METHOD)+str(cfg.K)+str(cfg.POS_SAMPLE_NUM)+".pkl"))

	preprocess = Preprocess(root=cfg.DATA_ROOT,use_cuda=cfg.USE_CUDA,test_batch_size=cfg.TEST_BATCH_SIZE,method=cfg.METHOD,dataset_name=cfg.DATASET,with_bounding_box=cfg.WITH_BOUNDING_BOX,download=cfg.DOWNLOAD)
	print("Done!")
	if cfg.METHOD==0:
		metric = 'cosine'
	else:
		metric = 'euclidean'
	print("embd_size=",cfg.EMBEDDING_WIDTH,"dataset=",cfg.DATASET)
	if cfg.METHOD==0:
		print("tau=",cfg.TAU,"K=",cfg.K,"N=",cfg.N,"N+=",cfg.POS_SAMPLE_NUM,"embd_width=",cfg.EMBEDDING_WIDTH,"batch_size=",cfg.BATCH_SIZE,'margin=',cfg.MARGIN)
	print("softmax_rate:",cfg.CE_LOSS_PARAM,"metric_rate:",cfg.METRIC_LOSS_PARAM)
	run_num=0
	sprarsity=0
	err_pos=0
	old_err_pos=0
	total_sprarsity=0
	total_err_pos=0

	for epoch in range(cfg.EPOCH):
	#train
		scheduler.step()
		totalEpochLoss=0
		if cfg.METHOD==0:
			iter_num=int(cfg.DATASET_NUM/(cfg.N+1)/cfg.BATCH_SIZE)
		if cfg.METHOD==1:
			iter_num=int(cfg.DATASET_NUM/cfg.BATCH_SIZE)	
		if cfg.METHOD==3 or cfg.METHOD==4 or cfg.METHOD==5 or cfg.METHOD==6 or cfg.METHOD==7 or cfg.METHOD==2:
			iter_num=int(cfg.DATASET_NUM/cfg.BATCH_SIZE/2)	
		for iter in tqdm(range(iter_num)):
			batch_img,y_,real_y=preprocess.next_train_batch(cfg.POS_SAMPLE_NUM,cfg.N, cfg.BATCH_SIZE)
			optimizer.zero_grad()
			if cfg.USE_CUDA==1:
				out=net(batch_img.cuda())
			else:
				out=net(batch_img)
#*****************************************************for prec@k loss******************************************#
			if cfg.METHOD==0:	
				if cfg.USE_CUDA==1:
					loss_metric,sprarsity,old_err_pos=metric_loss(out,y_.cuda())
					loss=cfg.METRIC_LOSS_PARAM*loss_metric+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y.cuda())
				else:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(out,y_)+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y)
#*****************************************************for angular loss******************************************#
			if cfg.METHOD==4:
				embed1=out[0:cfg.BATCH_SIZE,:]
				embed2=out[cfg.BATCH_SIZE:2*cfg.BATCH_SIZE,:]
				if cfg.USE_CUDA==1:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(embed1,embed2)+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y.cuda())
				else:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(embed1,embed2)+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y)
#*****************************************************for cluster loss******************************************#
			if cfg.METHOD==2:
				embed1=out[0:cfg.BATCH_SIZE,:]
				embed2=out[cfg.BATCH_SIZE:2*cfg.BATCH_SIZE,:]
				if cfg.USE_CUDA==1:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(out,real_y.cuda())+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y.cuda())
				else:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(out,real_y)+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y)
#*****************************************************for npair loss******************************************#
			if cfg.METHOD==3:
				embed1=out[0:cfg.BATCH_SIZE,:]
				embed2=out[cfg.BATCH_SIZE:2*cfg.BATCH_SIZE,:]
				if cfg.USE_CUDA==1:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(embed1,embed2)+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y.cuda())
				else:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(embed1,embed2)+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y)
#*****************************************************for lifted loss******************************************#
			if cfg.METHOD==5:
				if cfg.USE_CUDA==1:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(out,y_.cuda())+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y.cuda())
				else:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(out,y_)+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y)
#*****************************************************for triplet loss******************************************#
			if cfg.METHOD==6:
				embed1=out[0:cfg.BATCH_SIZE,:]
				embed2=out[cfg.BATCH_SIZE:2*cfg.BATCH_SIZE,:]
				if cfg.USE_CUDA==1:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(embed1,embed2)+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y.cuda())
				else:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(embed1,embed2)+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y)
#*****************************************************for contrastive loss******************************************#
			if cfg.METHOD==7:
				embed1=out[0:cfg.BATCH_SIZE,:]
				embed2=out[cfg.BATCH_SIZE:2*cfg.BATCH_SIZE,:]
				if cfg.USE_CUDA==1:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(embed1,embed2,y_.cuda())+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y.cuda())
				else:
					loss=cfg.METRIC_LOSS_PARAM*metric_loss(embed1,embed2,y_)+cfg.CE_LOSS_PARAM*softmax_loss(out,real_y)
#**********************************************************end*************************************************#
			err_pos=old_err_pos
			totalEpochLoss=totalEpochLoss+loss.data
			if math.isnan(loss.data)==False:
				loss.backward()
				optimizer.step()
			else:
				print(loss.data)
			#res=val.test_training_dataset(out,y_,5,cfg.BATCH_SIZE)
			total_sprarsity+=sprarsity/cfg.SHOW_PER_ITER
			total_err_pos+=err_pos/cfg.SHOW_PER_ITER
				
			if run_num%cfg.SHOW_PER_ITER==cfg.SHOW_PER_ITER-1:
				preck_test,recallk_test,_,_,_=val.test(net,preprocess.test_loader,cfg.TEST_K,metric)
				#preck_train,recallk_train,_,_,_=val.test(net,preprocess.test_train_loader,cfg.TEST_K,metric)
				print("iter:",run_num,"prec@K:",preck_test,"recall@K:",recallk_test)
				torch.save(net.state_dict(), cfg.MODEL_PATH+str(cfg.MARGIN)+str(cfg.EMBEDDING_WIDTH)+str(cfg.DATASET)+str(cfg.METRIC_LOSS_PARAM)+'x'+str(cfg.METHOD)+str(cfg.CE_LOSS_PARAM)+'x'+str(cfg.SOFTMAX_METHOD)+str(cfg.K)+str(cfg.POS_SAMPLE_NUM)+".pkl")	
				'''
				ratio=total_sprarsity/total_err_pos
				output = open('m'+str(cfg.MARGIN)+'k'+str(cfg.K)+'n+'+str(cfg.POS_SAMPLE_NUM)+'tau'+str(cfg.TAU)+str(cfg.DATASET)+'.txt', 'a')
				output.write(str(preck_test[0]))
				output.write(' ')
				output.write(str(preck_test[1]))
				output.write(' ')
				output.write(str(preck_test[2]))
				output.write(' ')
				output.write(str(preck_test[3]))
				output.write(' ')
				output.write(str(recallk_test[0]))
				output.write(' ')
				output.write(str(recallk_test[1]))
				output.write(' ')
				output.write(str(recallk_test[2]))
				output.write(' ')
				output.write(str(recallk_test[3]))
				
				output.write(str(preck_train[0]))
				output.write(' ')
				output.write(str(preck_train[1]))
				output.write(' ')
				output.write(str(preck_train[2]))
				output.write(' ')
				output.write(str(preck_train[3]))
				output.write(' ')
				output.write(str(total_sprarsity))
				output.write(' ')
				output.write(str(ratio))
				
				output.write('\r')
				output.close()
				total_sprarsity=0
				total_err_pos=0
				'''
			run_num+=1
		print("\r\nEpoch:",epoch,"tau:",cfg.TAU,"avgEpochLoss:",totalEpochLoss/iter_num)