예제 #1
0
def export_outline_tan(folder, body_id, person_id):
    args = {'folder': folder, 'person_id': person_id}
    l_model = LinearModel(**args)
    fbody = l_model.get_body(body_id, 'F')
    sbody = l_model.get_body(body_id, 'S')
    fbody.load_export_features()
    sbody.load_export_features()
    flag = 0
    try:
        base_path = os.path.join(config.tan_dir, folder)
        if not os.path.exists(base_path):
            os.mkdir(base_path)
        outline_exp = OutlineTan(fbody, sbody)
        file_path = os.path.join(base_path, '%s_FC.txt' % (body_id))
        outline_exp.export_front(file_path)
        file_path = os.path.join(base_path, '%s_SC.txt' % (body_id))
        outline_exp.export_side(file_path)
    except Exception as e:
        logger.error('outline export fail: body_id=%s, person_id=%s' %
                     (body_id, person_id))
        logger.error(e)
        flag -= 1
    try:
        exporter = FeatureTan(fbody, sbody)
        # sbody.features['f_neck_up_L'],_=exporter.map_front2side_feature('f_neck_up_L')
        file_path = os.path.join(base_path, '%s_FL.txt' % (body_id))
        exporter.write_file(file_path)
    except Exception as e:
        logger.error('feature export fail: body_id=%s, person_id=%s' %
                     (body_id, person_id))
        logger.error(e)
        flag -= 2
    return flag
예제 #2
0
파일: t_body.py 프로젝트: tigerxjtu/measure
def get_front_side_points(name):
    body_id = name
    l_model = LinearModel()
    fbody = l_model.get_body(body_id, 'F')
    sbody = l_model.get_body(body_id, 'S')
    fbody.load_export_features()
    sbody.load_export_features()
    outline_exp = OutlineTan(fbody, sbody)
    return outline_exp.front_points(),outline_exp.side_points()
예제 #3
0
파일: t_body.py 프로젝트: tigerxjtu/measure
def feature_adjust_test(name):
    body_id = name
    l_model = LinearModel()
    fbody = l_model.get_body(body_id, 'F')
    sbody = l_model.get_body(body_id, 'S')
    fbody.load_export_features()
    sbody.load_export_features()
    adjust = FeatureAdjust(fbody,sbody)
    adjust.adjust()
예제 #4
0
파일: t_body.py 프로젝트: tigerxjtu/measure
def feature_test(name):
    body_id = name
    l_model = LinearModel()
    fbody = l_model.get_body(body_id, 'F')
    sbody = l_model.get_body(body_id, 'S')
    fbody.load_export_features()
    sbody.load_export_features()
    exporter = FeatureTan(fbody, sbody)
    features = exporter.export_features()
예제 #5
0
def outline_tan(folder, body_id, person_id):
    global msg
    args = {'folder': folder, 'person_id': person_id}
    l_model = LinearModel(**args)
    fbody = l_model.get_body(body_id, 'F')
    sbody = l_model.get_body(body_id, 'S')
    bbody = l_model.get_body(body_id, 'B')
    b_outline_body = OutlineBody(bbody)

    f_bd_features = fbody.bdfeatureXY
    s_bd_features = sbody.bdfeatureXY
    b_bd_features = bbody.bdfeatureXY
    # fbody.load_export_features()
    # sbody.load_export_features()
    flag = 4
    front = []
    side = []
    back = []
    steps = 1
    try:
        outline_exp = OutlineTan(fbody, sbody)
        front_points = outline_exp.front_points()
        f_w = fbody.img_w
        f_h = fbody.img_h
        f_points = [dict(x=p[0], y=p[1]) for p in front_points]
        front = dict(width=f_w, height=f_h, featureXY=f_points)

        steps = 2
        side_points = outline_exp.side_points()
        s_w = sbody.img_w
        s_h = sbody.img_h
        s_points = [dict(x=p[0], y=p[1]) for p in side_points]
        side = dict(width=s_w, height=s_h, featureXY=s_points)
        steps = 3

        back_points = b_outline_body.outline_points()
        b_w = bbody.img_w
        b_h = bbody.img_h
        b_points = [dict(x=p[0], y=p[1]) for p in back_points]
        back = dict(width=b_w, height=b_h, featureXY=b_points)
    except Exception as e:
        logger.error('outline export fail: body_id=%s, person_id=%s' %
                     (body_id, person_id))
        logger.error(e)
        flag = 5
        msg = traceback.format_exc()
    return flag, front, side, back, f_bd_features, s_bd_features, b_bd_features
예제 #6
0
def feature_tan(folder, body_id, person_id):
    global msg
    args = {'folder': folder, 'person_id': person_id}
    l_model = LinearModel(**args)
    fbody = l_model.get_body(body_id, 'F')
    sbody = l_model.get_body(body_id, 'S')
    fbody.load_export_features()
    sbody.load_export_features()
    flag = 4
    features = {}
    try:
        exporter = FeatureTan(fbody, sbody)
        features = exporter.export_features()
    except Exception as e:
        logger.error('feature export fail: body_id=%s, person_id=%s' %
                     (body_id, person_id))
        logger.error(e)
        flag = 5
        msg = traceback.format_exc()
    return flag, features
예제 #7
0
			for seq in ind_seq:
				ret.append(self.decode(seq))
			return ret
		else:
			return self.ind2word[int(ind_seq)]

if __name__ == '__main__':

	args = parseArgs()

	# load vocab data
	with open('./data/VocabData.pkl', 'rb') as f:
		VocabData = pickle.load(f)

	# load linear model, transform feature tensor to semantic space
	linNet = LinearModel(hiddenSize=4096)

	sos_id = VocabData['word_dict']['<START>']
	eos_id = VocabData['word_dict']['<END>']

	lstmDec = DecoderRNN(vocab_size=len(VocabData['word_dict']),max_len=15,sos_id=sos_id, eos_id=eos_id , embedding_size=300,hidden_size=4096,
						 embedding_parameter=VocabData['word_embs'], update_embedding=False ,use_attention=True)

	# todo: reload lstmEnc
	linNet, lstmDec = reloadModel(args.model_path, linNet, lstmDec)

	loader = LoaderDemo()
	# loader = DataLoader(dataset, batch_size=args.batch_imgs, shuffle=False, num_workers=2,
						# collate_fn=dataset.collate_fn)

	symbolDec = SymbolDecoder(VocabData['word_dict'])
예제 #8
0
		default='./save/default/')
	parser.add_argument('-b','--batch_imgs',
		default=4, type=int)
	args = parser.parse_args()
	return args

if __name__ == '__main__':

	args = parseArgs()

	# load vocab data
	with open('./data/VocabData.pkl', 'rb') as f:
		VocabData = pickle.load(f)

	# load linear model, transform feature tensor to semantic space
	linNet = LinearModel(hiddenSize=4096)
	# load LSTM encoder
	lstmEnc = EncoderRNN(len(VocabData['word_dict']), 15, 4096, 300,
	                 input_dropout_p=0, dropout_p=0,
	                 n_layers=1, bidirectional=False, rnn_cell='lstm', variable_lengths=True,
	                 embedding_parameter=VocabData['word_embs'], update_embedding=False)
	# load crit
	crit = SimilarityLoss(0.5,0.5,1)

	if args.evaluate_mode:			# evaluation mode
		loader = LoaderEnc(mode='test')
		linNet,lstmEnc = reloadModel(args.model_path,linNet,lstmEnc)
		eval(loader,linNet,lstmEnc,crit)
	else:							# train mode
		optimizer = torch.optim.Adam(list(filter(lambda p: p.requires_grad, lstmEnc.parameters()))+list(linNet.parameters()), 0.0001)
		dataset = LoaderEnc()
예제 #9
0
    parser.add_argument('-s', '--save_path', default='./save/default/')
    parser.add_argument('-b', '--batch_imgs', default=4, type=int)
    args = parser.parse_args()
    return args


if __name__ == '__main__':

    args = parseArgs()

    # load vocab data
    with open('./data/VocabData.pkl', 'rb') as f:
        VocabData = pickle.load(f)

    # load linear model, transform feature tensor to semantic space
    linNet = LinearModel(hiddenSize=4096)
    # load LSTM encoder
    lstmEnc = EncoderRNN(len(VocabData['word_dict']),
                         15,
                         4096,
                         300,
                         input_dropout_p=0,
                         dropout_p=0,
                         n_layers=1,
                         bidirectional=False,
                         rnn_cell='lstm',
                         variable_lengths=True,
                         embedding_parameter=VocabData['word_embs'],
                         update_embedding=False)
    # load crit
    crit = SimilarityLoss(0.5, 0.5, 1)
예제 #10
0
	parser.add_argument('-c', '--cont_model_path',
						default='./save/default/lstmDec.pt')
	args = parser.parse_args()
	return args


if __name__ == '__main__':

	args = parseArgs()

	# load vocab data
	with open('./data/VocabData.pkl', 'rb') as f:
		VocabData = pickle.load(f)

	# load linear model, transform feature tensor to semantic space
	linNet = LinearModel()

	sos_id = VocabData['word_dict']['<START>']
	eos_id = VocabData['word_dict']['<END>']
	# load LSTM encoder

	lstmDec = DecoderRNN(vocab_size=len(VocabData['word_dict']),max_len=15,sos_id=sos_id, eos_id=eos_id , embedding_size=300,hidden_size=1024,
						 embedding_parameter=VocabData['word_embs'], update_embedding=False ,use_attention=True,use_prob_vector=True)
	"""
	lstmEnc = EncoderRNN(len(VocabData['word_dict']), max_len=15, hidden_size=4096, embedding_size=300,
						 input_dropout_p=0, dropout_p=0,
						 n_layers=1, bidirectional=False, rnn_cell='lstm', variable_lengths=False,
						 embedding_parameter=VocabData['word_embs'], update_embedding=False)
	# todo: reload lstmEnc
	#linNet,lstmEnc = reloadModel(args.model_path, linNet, lstmEnc)
	"""