def neural_style(x, model, content_features, grams, args): # TV loss x = np.asarray(np.reshape(x, args.shape), dtype=np.float32) x = Variable(chainer.dataset.concat_examples([x], args.gpu)) loss = args.tv_weight * total_variation(x) # Extract features for x layers = args.content_layers | args.style_layers x_features = extract({'data': x}, model, layers) x_features = {key: value[0] for key, value in x_features.items()} # Concent loss for layer in args.content_layers: loss += args.content_weight * normlize_grad( F.MeanSquaredError(), (content_features[layer], x_features[layer]), normalize=args.normalize_gradients) # Style loss for layer in args.style_layers: loss += args.style_weight * normlize_grad( F.MeanSquaredError(), (grams[layer], gram(x_features[layer])), normalize=args.normalize_gradients) loss.backward() # GPU to CPU loss = cuda.to_cpu(loss.data) diff = np.asarray(cuda.to_cpu(x.grad).flatten(), dtype=np.float64) return loss, diff
def __init__(self, *args, **kwargs): self.G, self.D = kwargs.pop('models') self.args = kwargs.pop('args') self.args.content_layers = set(self.args.content_layers) self.args.style_layers = set(self.args.style_layers) self.layers = self.args.content_layers | self.args.style_layers print('Extract style feature from {} ...\n'.format( self.args.style_image_path)) style_image = im_preprocess_vgg(imread(self.args.style_image_path), load_size=self.args.style_load_size, dtype=np.float32) style_image_var = Variable(chainer.dataset.concat_examples( [style_image], self.args.gpu), volatile='on') style_features = extract({'data': style_image_var}, self.D, self.args.style_layers) self.grams = {} for key, value in style_features.items(): gram_feature = gram(value[0]) _, w, h = gram_feature.shape gram_feature = F.broadcast_to(gram_feature, (self.args.batch_size, w, h)) gram_feature.volatile = 'off' self.grams[key] = gram_feature super(StyleUpdater, self).__init__(*args, **kwargs)
def update_core(self): batch = self.get_iterator('main').next() input_var = Variable(self.converter(batch, self.device)) content_features = extract({'data': input_var}, self.D, self.args.content_layers) content_features = { key: value[0] for key, value in content_features.items() } output_var = self.G(input_var) ouput_features = extract({'data': output_var}, self.D, self.layers) optimizer = self.get_optimizer('main') optimizer.update(self.loss, ouput_features, content_features, output_var)
def color_adjust(x, args): if args.iter % args.save_intervel == 0: save_result(x, args) args.iter += 1 # Input for VGG x_vgg = np.asarray(np.reshape(x, args.shape), dtype=np.float32) x_vgg_var = Variable(chainer.dataset.concat_examples([x_vgg], args.gpu)) # Poisson loss poisson_loss = F.mean_squared_error( (args.content_laplace + args.border_sum) * args.mask_var, F.convolution_2d(x_vgg_var * args.mask_var, W=args.W_laplace, pad=1) * args.mask_var) poisson_loss *= np.prod(x_vgg_var.shape) # tv loss tv_loss = total_variation(x_vgg_var) # Concent loss content_loss = 0 x_features = extract({'data': x_vgg_var}, args.vgg, args.content_layers) x_features = {key: value[0] for key, value in x_features.items()} for layer in args.content_layers: content_loss += F.mean_squared_error(args.content_features[layer], x_features[layer]) # Realism loss y = args.realism_cnn(x_vgg_var, dropout=False) b, _, w, h = y.shape xp = cuda.get_array_module(x_vgg_var.data) realism_loss = F.sum(y[:, 0, :, :]) loss = args.poisson_weight * poisson_loss + args.realism_weight * realism_loss + args.tv_weight * tv_loss + args.content_weight * content_loss # Backward loss.backward() # Transfer loss & diff from GPU to CPU loss = cuda.to_cpu(loss.data) dx = np.squeeze(cuda.to_cpu(x_vgg_var.grad)) return loss, np.asarray(dx.flatten(), dtype=np.float64)
def main(): parser = argparse.ArgumentParser( description='Poisson image editing using RealismCNN') parser.add_argument('--poisson_weight', type=float, default=1, help='Weight for poisson loss') parser.add_argument('--realism_weight', type=float, default=1e4, help='Weight for realism loss') parser.add_argument('--content_weight', type=float, default=1, help='Weight for content loss') parser.add_argument('--tv_weight', type=float, default=1e-1, help='Weight for tv loss') parser.add_argument('--n_iteration', type=int, default=1000, help='# of iterations') parser.add_argument('--save_intervel', type=int, default=100, help='save result every # of iterations') parser.add_argument('--rand_init', type=lambda x: x == 'True', default=True, help='Random init input if True') parser.add_argument('--content_layers', type=str2list, default='conv4_1', help='Layers for content_loss, sperated by ;') parser.add_argument('--gpu', type=int, default=0, help='GPU ID (negative value indicates CPU)') parser.add_argument('--realism_model_path', default='model/realismCNN_all_iter3.npz', help='Path for pretrained Realism model') parser.add_argument('--content_model_path', default='model/VGG_ILSVRC_19_layers.pkl', help='Path for pretrained VGG model') parser.add_argument( '--data_root', default='/data1/wuhuikai/benchmark/Realistic/color_adjustment', help='Root folder for color adjustment dataset') parser.add_argument('--img_folder', default='pngimages', help='Folder for stroing images') parser.add_argument('--list_name', default='list.txt', help='Name for file storing image list') parser.add_argument('--load_size', type=int, default=224, help='Scale image to load_size') parser.add_argument('--result_folder', default='image_editing_result', help='Name for folder storing results') parser.add_argument('--result_name', default='loss.txt', help='Name for file saving loss change') args = parser.parse_args() args.content_layers = set(args.content_layers) print('Input arguments:') for key, value in vars(args).items(): print('\t{}: {}'.format(key, value)) print('') args.prefix_name = '_'.join( sorted([ '{}({})'.format(key, value) for key, value in vars(args).items() if key not in set([ 'realism_model_path', 'content_model_path', 'data_root', 'img_folder', 'list_name', 'result_folder', 'result_name' ]) ])) # Init CNN model realism_cnn = RealismCNN() print('Load pretrained Realism model from {} ...'.format( args.realism_model_path)) serializers.load_npz(args.realism_model_path, realism_cnn) print('Load pretrained VGG model from {} ...\n'.format( args.content_model_path)) with open(args.content_model_path, 'rb') as f: vgg = pickle.load(f) if args.gpu >= 0: chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current realism_cnn.to_gpu() # Copy the model to the GPU vgg.to_gpu() # Init image list im_root = os.path.join(args.data_root, args.img_folder) print('Load images from {} according to list {} ...'.format( im_root, args.list_name)) with open(os.path.join(args.data_root, args.list_name)) as f: im_list = f.read().strip().split('\n') total = len(im_list) print('{} images loaded done!\n'.format(total)) # Init result folder if not os.path.isdir(args.result_folder): os.makedirs(args.result_folder) print('Result will save to {} ...\n'.format(args.result_folder)) # Init Constant Variable W_laplace = Variable(make_kernel( 3, 3, np.asarray([[0, -1, 0], [-1, 4, -1], [0, -1, 0]], dtype=np.float32)), volatile='auto') W_laplace.to_gpu() args.W_laplace = W_laplace W_sum = Variable(make_kernel( 3, 3, np.asarray([[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=np.float32)), volatile='auto') W_sum.to_gpu() loss_change = [] for idx, im_name in enumerate(im_list): print('Processing {}/{}, name = {} ...'.format(idx + 1, total, im_name)) obj_vgg = im_preprocess_vgg(imread( os.path.join(im_root, '{}_obj.png'.format(im_name))), args.load_size, dtype=np.float32) bg_vgg = im_preprocess_vgg(imread( os.path.join(im_root, '{}_bg.png'.format(im_name))), args.load_size, dtype=np.float32) expand_mask = im_preprocess_vgg(imread( os.path.join(im_root, '{}_softmask.png'.format(im_name))), args.load_size, sub_mean=False, dtype=np.uint8, preserve_range=False) args.orig_size = (args.load_size, args.load_size) args.shape = bg_vgg.shape ## mask mask = erosion(np.squeeze(expand_mask), np.ones((3, 3), dtype=np.uint8)) mask = np.asarray(mask[np.newaxis, :, :], dtype=np.float32) expand_mask = np.asarray(expand_mask, dtype=np.float32) inverse_mask = 1 - mask ## vars obj_var = Variable(chainer.dataset.concat_examples([obj_vgg], args.gpu), volatile='on') mask_var = F.broadcast_to( Variable(chainer.dataset.concat_examples([mask], args.gpu)), obj_var.shape) ## Laplace content_laplace = F.convolution_2d(obj_var, W=W_laplace, pad=1) content_laplace.volatile = 'off' # prefilled border = bg_vgg * expand_mask * inverse_mask border_var = Variable(chainer.dataset.concat_examples([border], args.gpu), volatile='on') border_sum = F.convolution_2d(border_var, W=W_sum, pad=1) border_sum.volatile = 'off' print('\tExtracting content image features ...') copy_paste_vgg = obj_vgg * mask + bg_vgg * inverse_mask copy_paste_var = Variable(chainer.dataset.concat_examples( [copy_paste_vgg], args.gpu), volatile='on') content_features = extract({'data': copy_paste_var}, vgg, args.content_layers) content_features = { key: value[0] for key, value in content_features.items() } for _, value in content_features.items(): value.volatile = 'off' ## args args.vgg = vgg args.realism_cnn = realism_cnn args.border_sum = border_sum args.content_laplace = content_laplace args.content_features = content_features args.mask = mask args.mask_var = mask_var args.inverse_mask = inverse_mask args.bg_vgg = bg_vgg args.copy_paste_vgg = copy_paste_vgg args.im_name = im_name args.iter = 0 x_init = np.asarray( np.random.randn(*args.shape) * 0.001, dtype=np.float32) if args.rand_init else np.copy(copy_paste_vgg) print('\tOptimize start ...') res = minimize(color_adjust, x_init, args=(args), method='L-BFGS-B', jac=True, options={ 'maxiter': args.n_iteration, 'disp': False }) # Cut and paste loss args.iter = -1 f0, _ = color_adjust(copy_paste_vgg, args) print('\tOptimize done, loss = {} from {}\n'.format(res.fun, f0)) loss_change.append((im_name, f0, res.fun)) args.iter = '' save_result(res.x, args) with open(os.path.join(args.result_folder, args.result_name), 'w') as f: for name, f0, fb in loss_change: f.write('{} {} {}\n'.format(name, f0, fb))
collate_fn=cnn1d2d_collate) elif args.net_type == 'mlp': test_set = MLPDataset(args.test_file, args.feats_dir, args.feats_type) test_loader = pyDataLoader(test_set, batch_size=1, shuffle=False, collate_fn=mlp_collate) # Test if args.phase == 'test': test(device=device, net=net, criterion=criterion, model_file=args.model_file, test_loader=test_loader, icvec=icvec, save_file=args.save_file) # Embedding extractor elif args.phase == 'extract': extract(device=device, net=net, model_file=args.model_file, names_file=args.test_file, loader=test_loader, save_file=args.emb_save_file) else: print('[!] Unknown phase') exit(0)
def main(): parser = argparse.ArgumentParser( description='Transfer style from src image to target image') parser.add_argument('--gpu', type=int, default=0, help='GPU ID (negative value indicates CPU)') parser.add_argument('--content_image', default='images/towernight.jpg', help='Content target image') parser.add_argument('--style_images', type=str2list, default='images/Starry_Night.jpg', help='Style src images, sperated by ;') parser.add_argument( '--blend_weights', type=lambda x: np.array([float(i) for i in x.split(';')]), default=None, help='Weight for each style image, sperated by ;') parser.add_argument('--content_weight', type=float, default=5, help='Weight for content loss') parser.add_argument('--style_weight', type=float, default=100, help='Weight for style loss') parser.add_argument('--tv_weight', type=float, default=1e-3, help='Weight for tv loss') parser.add_argument('--n_iteration', type=int, default=1000, help='# of iterations') parser.add_argument('--normalize_gradients', type=str2bool, default=False, help='Normalize gradients if True') parser.add_argument('--rand_init', type=str2bool, default=True, help='Random init input if True') parser.add_argument('--content_load_size', type=int, default=512, help='Scale content image to load_size') parser.add_argument('--style_load_size', type=int, default=512, help='Scale style image to load_size') parser.add_argument('--original_color', type=str2bool, default=False, help='Same color with content image if True') parser.add_argument('--style_color', type=str2bool, default=False, help='Same color with style image if True') parser.add_argument('--content_layers', type=str2list, default='relu4_2', help='Layers for content_loss, sperated by ;') parser.add_argument('--style_layers', type=str2list, default='relu1_1;relu2_1;relu3_1;relu4_1;relu5_1', help='Layers for style_loss, sperated by ;') parser.add_argument('--model_path', default='models/VGG_ILSVRC_19_layers.pkl', help='Path for pretrained model') parser.add_argument('--out_folder', default='images/result', help='Folder for storing output result') parser.add_argument('--prefix', default='', help='Prefix name for output image') args = parser.parse_args() print('Load pretrained model from {} ...'.format(args.model_path)) with open(args.model_path, 'rb') as f: model = pickle.load(f) if args.gpu >= 0: chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current model.to_gpu() # Copy the model to the GPU print('Load content image {} ...'.format(args.content_image)) content_im_orig = imread(args.content_image) args.content_orig_size = content_im_orig.shape[: 2] if args.content_load_size else None content_im = im_preprocess_vgg(content_im_orig, load_size=args.content_load_size, dtype=np.float32) args.shape = content_im.shape print('Load style image(s) ...\n\t{}'.format('\t'.join(args.style_images))) style_images = [ im_preprocess_vgg(imread(im_path), load_size=args.style_load_size, dtype=np.float32) for im_path in args.style_images ] if args.blend_weights is None: args.blend_weights = np.ones(len(style_images)) args.blend_weights /= np.sum(args.blend_weights) print('Blending weight for each stype image: {}'.format( args.blend_weights)) # Init x x = np.asarray(np.random.randn(*content_im.shape) * 0.001, dtype=np.float32) if args.rand_init else np.copy(content_im) print('Extracting content image features ...') args.content_layers = set(args.content_layers) content_im = Variable(chainer.dataset.concat_examples([content_im], args.gpu), volatile='on') content_features = extract({'data': content_im}, model, args.content_layers) content_features = { key: value[0] for key, value in content_features.items() } for _, value in content_features.items(): value.volatile = 'off' print('Extracting style image features ...') grams = {} args.style_layers = set(args.style_layers) for i, style_image in enumerate(style_images): style_image = Variable(chainer.dataset.concat_examples([style_image], args.gpu), volatile='on') style_features = extract({'data': style_image}, model, args.style_layers) for key, value in style_features.items(): gram_feature = gram(value[0]) if key in grams: grams[key] += args.blend_weights[i] * gram_feature else: grams[key] = args.blend_weights[i] * gram_feature for _, value in grams.items(): value.volatile = 'off' print('Optimize start ...') res = minimize(neural_style, x, args=(model, content_features, grams, args), method='L-BFGS-B', jac=True, options={ 'maxiter': args.n_iteration, 'disp': True }) loss0, _ = neural_style(x, model, content_features, grams, args) print('Optimize done, loss = {}, with loss0 = {}'.format(res.fun, loss0)) img = im_deprocess_vgg(np.reshape(res.x, args.shape), orig_size=args.content_orig_size, dtype=np.uint8) if args.original_color: img = original_colors(content_im_orig, img) if args.style_color: img = style_colors(content_im_orig, img) img = np.asarray(img, dtype=np.uint8) # Init result list if not os.path.isdir(args.out_folder): os.makedirs(args.out_folder) print('Result will save to {} ...\n'.format(args.out_folder)) name = '{}_with_style(s)'.format( os.path.splitext(os.path.basename(args.content_image))[0]) for path in args.style_images: name = '{}_{}'.format(name, os.path.splitext(os.path.basename(path))[0]) if args.prefix: name = '{}_{}'.format(args.prefix, name) imsave(os.path.join(args.out_folder, '{}.png'.format(name)), img)
def gethors(what, startime, endtime): today = {} pro = [] diao = [] hours = [] firsttime = datetime.datetime.today() lasttime = datetime.datetime.today() if what == 'today': todays = datetime.datetime.today() NOW = datetime.datetime(todays.year, todays.month, todays.day, 23, 59, 59) for i in range(24): hours.append('%s%s' % (i, '点')) procont = db_session.query(func.count(Order.id)).\ filter(Order.addtime.between(NOW - datetime.timedelta(seconds=i*3600-1), NOW - datetime.timedelta(hours=i-1))).\ filter(Order.order_type==0).scalar() pro.append(procont) # print NOW - datetime.timedelta(seconds=i*3600 - 1),NOW - datetime.timedelta(hours=i - 1) diaocont = db_session.query(func.count(Order.id)).\ filter(Order.addtime.between(NOW - datetime.timedelta(seconds=i*3600-1), NOW - datetime.timedelta(hours=i-1))).\ filter(Order.order_type==1).scalar() diao.append(diaocont) today['pro'] = pro[::-1] today['diao'] = diao[::-1] firsttime = datetime.datetime(todays.year, todays.month, todays.day, 0, 0, 0) lasttime = datetime.datetime(todays.year, todays.month, todays.day, 23, 59, 59) if what == 'week': now = datetime.datetime.now() firsttime = now - datetime.timedelta(days=now.weekday()) firsttime = datetime.datetime(firsttime.year, firsttime.month, firsttime.day) lasttime = now + datetime.timedelta(days=6 - now.weekday()) lasttime = datetime.datetime(lasttime.year, lasttime.month, lasttime.day, 23, 59, 59) for i in range(1, 8, 1): procont = db_session.query(func.count(Order.id)).\ filter(and_( extract('year', Order.addtime) == firsttime.year, extract('month', Order.addtime) == firsttime.month, extract('day', Order.addtime) == firsttime.day + i )).\ filter(Order.order_type==0).scalar() pro.append(procont) diaocont = db_session.query(func.count(Order.id)).\ filter(and_( extract('year', Order.addtime) == firsttime.year, extract('month', Order.addtime) == firsttime.month, extract('day', Order.addtime) == firsttime.day + i )).\ filter(Order.order_type==1).scalar() diao.append(diaocont) if i == 1: i = '一' if i == 2: i = '二' if i == 3: i = '三' if i == 4: i = '四' if i == 5: i = '五' if i == 6: i = '六' if i == 7: i = '日' hours.append('%s%s' % ('星期', i)) today['pro'] = pro today['diao'] = diao if what == 'month': now = datetime.datetime.now() firsttime = datetime.datetime(now.year, now.month, 1) firsttime = datetime.datetime(firsttime.year, firsttime.month, firsttime.day) if now.month == 12: lasttime = datetime.datetime(now.year, 12, 31) else: lasttime = datetime.datetime(now.year, now.month + 1, 1) - datetime.timedelta(days=1) lasttime = datetime.datetime(lasttime.year, lasttime.month, lasttime.day, 23, 59, 59) lastday = lasttime.day for i in range(1, lastday + 1, 1): hours.append('%s%s' % (i, '号')) procont = db_session.query(func.count(Order.id)).\ filter(and_( extract('year', Order.addtime) == firsttime.year, extract('month', Order.addtime) == firsttime.month, extract('day', Order.addtime) == i )).\ filter(Order.order_type==0).scalar() pro.append(procont) # print NOW - datetime.timedelta(seconds=i*3600 - 1),NOW - datetime.timedelta(hours=i - 1) diaocont = db_session.query(func.count(Order.id)).\ filter(and_( extract('year', Order.addtime) == firsttime.year, extract('month', Order.addtime) == firsttime.month, extract('day', Order.addtime) == i )).\ filter(Order.order_type==1).scalar() diao.append(diaocont) today['pro'] = pro today['diao'] = diao if what == 'year': now = datetime.datetime.now() firsttime = datetime.datetime(now.year, 1, 1) firsttime = datetime.datetime(firsttime.year, firsttime.month, firsttime.day) lasttime = datetime.datetime(now.year, 12, 31) lasttime = datetime.datetime(lasttime.year, lasttime.month, lasttime.day, 23, 59, 59) # procont = db_session.query(extract('month', Order.order_type).label('month'), func.count(Order.id).label('count')).group_by('month') for i in range(1, 13, 1): hours.append('%s%s' % (i, '月')) procont = db_session.query(func.count(Order.id)).\ filter(and_( extract('year', Order.addtime) == firsttime.year, extract('month', Order.addtime) == i )).\ filter(Order.order_type==0).scalar() # procont = db_session.query(func.count(Order.id)).\ # filter(Order.addtime.between(datetime.datetime(now.year, i + 1, 1) + , datetime.datetime(now.year, i + 1, 1))).\ # filter(Order.order_type==0).scalar() pro.append(procont) # print NOW - datetime.timedelta(seconds=i*3600 - 1),NOW - datetime.timedelta(hours=i - 1) diaocont = db_session.query(func.count(Order.id)).\ filter(and_( extract('year', Order.addtime) == firsttime.year, extract('month', Order.addtime) == i )).\ filter(Order.order_type==1).scalar() diao.append(diaocont) today['pro'] = pro today['diao'] = diao if what == 'diy': firsttime = datetime.datetime.strptime(startime, '%Y-%m-%d') lasttime = datetime.datetime.strptime(endtime, '%Y-%m-%d') lasttime = datetime.datetime(lasttime.year, lasttime.month, lasttime.day, 23, 59, 59) for d in gen_dates(firsttime, (lasttime - firsttime).days + 1): hours.append('%s%s%s%s%s' % (d.year, '-', d.month, '-', d.day)) # c = lasttime - firsttime # for i in range(1,c.days + 1): procont = db_session.query(func.count(Order.id)).\ filter(and_( extract('year', Order.addtime) == d.year, extract('month', Order.addtime) == d.month, extract('day', Order.addtime) == d.day )).\ filter(Order.order_type==0).scalar() pro.append(procont) # print NOW - datetime.timedelta(seconds=i*3600 - 1),NOW - datetime.timedelta(hours=i - 1) diaocont = db_session.query(func.count(Order.id)).\ filter(and_( extract('year', Order.addtime) == d.year, extract('month', Order.addtime) == d.month, extract('day', Order.addtime) == d.day )).\ filter(Order.order_type==1).scalar() diao.append(diaocont) today['pro'] = pro today['diao'] = diao ''' firsttime 当日0点 lasttime 当日23点 获取当日商品订单金额情况 ''' # print firsttime,lasttime daif = db_session.query(Order).\ filter(Order.order_type == 0).\ filter(Order.state == 0).\ filter(Order.addtime.between(firsttime,lasttime)).all() firstmoney = db_session.query(OrderState).\ filter(OrderState.orderid == Order.id).\ filter(Order.order_type == 0).\ filter(OrderState.state == 1).\ filter(OrderState.uptime.between(firsttime,lasttime)).all() centermoney = db_session.query(OrderState).\ filter(OrderState.orderid == Order.id).\ filter(Order.order_type == 0).\ filter(OrderState.state == 8).\ filter(OrderState.uptime.between(firsttime,lasttime)).all() lastmoney = db_session.query(OrderState).\ filter(OrderState.orderid == Order.id).\ filter(Order.order_type == 0).\ filter(OrderState.state == 13).\ filter(OrderState.uptime.between(firsttime,lasttime)).all() # print firstmoney daiflen = 0 firstmoneylen = 0 centermoneylen = 0 lastmoneylen = 0 if daif: x = 0 for x in daif: daiflen += int(x.order_total) if firstmoney: # print firstmoney x = 0 for x in firstmoney: firstmoneylen += int(x.text) if centermoney: x = 0 for x in centermoney: centermoneylen += int(x.text) if lastmoney: x = 0 for x in lastmoney: lastmoneylen += int(x.text) ''' 获取当日借调订单金额 ''' diaomoneyed = db_session.query(Order).\ filter(Order.id == OrderState.orderid).\ filter(Order.order_type == 1).\ filter(OrderState.uptime.between(firsttime,lasttime)).all() diaomed = 0 if diaomoneyed: x = 0 for x in diaomoneyed: diaomed += int(x.order_total) diaomoney = db_session.query(Order).\ filter(Order.id != OrderState.orderid).\ filter(Order.order_type == 1).\ filter(Order.addtime.between(firsttime,lasttime)).all() diaom = 0 if diaomoney: x = 0 for x in diaomoney: diaom += int(x.order_total) today['hours'] = hours today['money'] = { 'dai': daiflen, 'dailen': len(daif), 'first': firstmoneylen, 'firstlen': len(firstmoney), 'center': centermoneylen, 'centerlen': len(centermoney), 'last': lastmoneylen, 'lastlen': len(lastmoney) } today['diao_money'] = { 'daidiao': diaom, 'diaoed': diaomed, 'daidiaolen': len(diaomoney), 'diaoedlen': len(diaomoneyed) } return today
def GET( self, name ): res = model.extract() print str( res[ 0 ] ) return render.login()