Exemple #1
0
    def request_argv(args, pre_api=None):
        '''
        命令行参数解析成dict类型,并执行。参数里可以有$a/b/c 类型变量
        :param args: cmd参数组成的dict
        :return: 执行的结果
        '''
        param_dic = {}
        if '-p' in args.keys():
            v = args['-p']
            if re.match("\{('\w+'\s*:\s*'?\$?\w+'?)(?:\s*,\s*'\w+':\s*'?\$?\w+)*'?\}", v):
                '''
                预先加载的接口中,有$变量引用的,先替换变量,被应用的变量,需是常量,或者在引用之前被计算
                '''
                reg = '(\$(?:\*|[\w/]+))'
                var_list = re.findall(reg, v)
                # getStudentInfo.do -p {'studentId':$studentId}
                for va in var_list:
                    rand = utils.random_select(utils.find_by_path(pre_api, va[1:]))
                    v.replace(va, str(rand))
                param_dic['-p'] = v
            # -p {userId:1, loginId:123@qq, ...} 转化成dict
            if re.match("\{('\w+':\s*'?[\w@]+'?)(?:\s*,\s*'\w+':\s*'?[\w@]+'?)*}", v):
                param_dic = eval(v)

        # ready to request
        cfg = Apis()
        api_dic = cfg.find_api(args['name'])
        api_dic['params'].update(param_dic)
        newp = Api(api_dic, True if '-v' in args.keys() else False)
        return newp.request_and_find(args['-j'] if '-j' in args.keys() else None)
Exemple #2
0
    def __init__(self, api_info, verbose=False):
        self.counter = 0
        self.verbose = verbose
        if isinstance(api_info, str):
            self.api = json.loads(api_info, encoding='utf-8', object_pairs_hook=OrderedDict)
        elif isinstance(api_info, dict):
            self.api = api_info

        # 对于dict对象api来说,结构对应于params的一个api,对于其第一层key-value,如果有$xxx/xxx,或者@getApi ... 这类值,首先处理
        # $之后的语句,认为是一个路径,通过该路径可以找到json结构中的某个值,并引用。
        # @之后,认为是一个接口依赖,会首先请求,并把相应值挂在到json结构该处
        for k, v in api_info.items():

            if isinstance(v, str):
                if '$' in v:
                    # 匹配$a/b/c形式字符串
                    reg = '(\$(?:\*|[\w/]+))'
                    var_list = re.findall(reg, v)
                    for va in var_list:
                        rand = utils.random_select(utils.find_by_path(self.api, va[1:]))
                        v = v.replace(va, str(rand) if rand else '')
                    api_info[k] = v

            # 匹配@apiName形式字符串
            if isinstance(v, str) and v.startswith('@'):
                api_info[k] = self.request_cmd(v[1:])
Exemple #3
0
    saver = tf.train.Saver(max_to_keep=10, var_list=var_list)

    with multi_gpu.create_session() as sess:
        sess.run(tf.global_variables_initializer())
        ckpt_file = tf.train.latest_checkpoint(result_path)
        begin_epoch = 1
        if ckpt_file is not None:
            print('Restoring model from {}...'.format(ckpt_file))
            begin_epoch = int(ckpt_file.split('.')[-4]) + 1
            saver.restore(sess, ckpt_file)
        for epoch in range(begin_epoch, epoches + 1):
            if epoch % anneal_lr_freq == 0:
                learning_rate *= anneal_lr_rate

            x_train_source, t_train_source = utils.random_select(
                x_train.reshape(n_y, -1, n_x), t_train)
            x_train_source = np.tile(x_train_source, (n_y, 1))
            t_train_source = np.tile(t_train_source, (n_y, 1))
            x_train_tmp, t_train_tmp = utils.shuffle(
                np.concatenate((x_train.reshape(
                    -1, n_x, 1), x_train_source.reshape(-1, n_x, 1)),
                               axis=2),
                np.concatenate((t_train.reshape(
                    -1, n_code, 1), t_train_source.reshape(-1, n_code, 1)),
                               axis=2))
            x_train_shuffle = x_train_tmp[:, :, 0].reshape(-1, n_x)
            t_train_shuffle = t_train_tmp[:, :, 0].reshape(-1, n_code)
            x_train_source = x_train_tmp[:, :, 1].reshape(-1, n_x)
            t_train_source = t_train_tmp[:, :, 1].reshape(-1, n_code)
            # x_train = np.reshape(x_train, [-1, n_xl, n_xl, 1])
            lower_bounds = []
Exemple #4
0
    def params(self):
        '''
        "@if $type==1: '@getNewAccount.do {'type':3,'classId':$classId}' else"
        '''
        ps = self.api['params']
        for k in ps.keys():
            v = ps[k]

            if isinstance(v, dict) or isinstance(v, list):
                v = json.dumps(v)

            if not isinstance(v, str):
                continue

            if '%' in v:
                v = v % (self.counter)

            if re.match(r'\{[\u4e00-\u9fa5\w]+(,[\u4e00-\u9fa5\w]+)*\}', v):
                v = utils.random_select(v)

            # 1. 处理$变量
            if '$' in v:
                reg = '(\$(?:\*|[\w/]+))'
                var_list = re.findall(reg, v)
                for va in var_list:

                    if va == '$*':
                        del ps[k]
                        # print(ps)
                        v = v.replace(va, json.dumps({k: v for k, v in ps.items() if v is not None}))
                        ps[k] = v
                    else:
                        rand = utils.random_select(utils.find_by_path(self.api, va[1:]))
                        if va[1:] in ps.keys():
                            ps[va[1:]] = rand if rand else ''
                        v = v.replace(va, str(rand))

            # 1. 可执行代码块
            if re.findall('!\{\{.+?\}\}', v):
                if re.findall('!\{\{.+?\}\}', v) or re.findall('!<<.+?>>', v):
                    v = utils.eval_all(v)

            # @后面的代码
            if v.startswith('@'):

                while isinstance(v, str) and v.startswith('@'):
                    cmd = v[1:]
                    if cmd.startswith('if '):
                        if_start = cmd.find(':', cmd.index('if')) + 1
                        if_end = len(cmd)
                        else_sentence = None
                        if cmd.find('else') != -1:
                            if_end = cmd.find('else', if_start)
                            else_start = cmd.find('else:')
                            else_sentence = cmd[else_start + len('else:'):].strip()
                        if_cmd = cmd[len('if'):if_start - 1].strip()
                        if_sentence = cmd[if_start:if_end].strip()
                        if eval(if_cmd):
                            v = if_sentence
                        else:
                            v = else_sentence
                    else:
                        v = self.request_cmd(cmd)

            # 准备废弃的语法
            if isinstance(v, str) and '@' in v:
                at_list = re.findall(r'@<<.+?>>', v)
                for al in at_list:
                    v = v.replace(al, self.request_cmd(al[3:-2]))

            ps[k] = v

        return {k: v for k, v in ps.items() if v is not None}
Exemple #5
0
    def get_code():

    for i in params:
        print(i.name, i.get_shape())

    var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                 scope='decoder') + \
               tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                 scope='encoder')

    saver = tf.train.Saver(max_to_keep=10, var_list=var_list)

    with multi_gpu.create_session() as sess:
        sess.run(tf.global_variables_initializer())
        ckpt_file = tf.train.latest_checkpoint(result_path)
        begin_epoch = 1
        if ckpt_file is not None:
            print('Restoring model from {}...'.format(ckpt_file))
            begin_epoch = int(ckpt_file.split('.')[-4]) + 1
            saver.restore(sess, ckpt_file)
        for epoch in range(begin_epoch, epoches + 1):
            if epoch % anneal_lr_freq == 0:
                learning_rate *= anneal_lr_rate

            x_train_source, t_train_source = utils.random_select(x_train.reshape(n_y, -1, n_x), t_train)
            x_train_source = np.tile(x_train_source,(n_y,1))
            t_train_source = np.tile(t_train_source,(n_y,1))
            x_train_tmp, t_train_tmp = utils.shuffle(
                np.concatenate((x_train.reshape(-1, n_x, 1), x_train_source.reshape(-1, n_x, 1)), axis=2),
                np.concatenate((t_train.reshape(-1, n_code, 1), t_train_source.reshape(-1, n_code, 1)), axis=2))
            x_train_shuffle = x_train_tmp[:, :, 0].reshape(-1, n_x)
            t_train_shuffle = t_train_tmp[:, :, 0].reshape(-1, n_code)
            x_train_source = x_train_tmp[:, :, 1].reshape(-1, n_x)
            t_train_source = t_train_tmp[:, :, 1].reshape(-1, n_code)
            # x_train = np.reshape(x_train, [-1, n_xl, n_xl, 1])
            lower_bounds = []
            tv_losses = []
            time_train = -time.time()
            for t in range(train_iters):
                iter = t + 1
                x_batch = x_train_shuffle[t * train_batch_size:(t + 1) * train_batch_size]
                x_batch_bin = sess.run(x_bin, feed_dict={x_orig: x_batch})
                t_batch = t_train_shuffle[t * train_batch_size:(t + 1) * train_batch_size]
                x_batch_source = x_train_source[t * train_batch_size:(t + 1) * train_batch_size]
                x_batch_source_bin = sess.run(x_bin, feed_dict={x_orig:x_batch_source})
                t_batch_source = t_train_source[t * train_batch_size:(t + 1) * train_batch_size]

                _, lb ,  tv = sess.run([infer, lower_bound , tv_loss],
                                          feed_dict={x_orig: x_batch, x: x_batch_bin, code: t_batch,
                                                    x_source: x_batch_source_bin, code_source: t_batch_source,
                                                    learning_rate_ph: learning_rate, is_training: True})
                lower_bounds.append(lb)
                tv_losses.append(tv)

                if iter % print_freq == 0:
                    print('Epoch={} Iter={} ({:.3f}s/iter): '
                          'Lower Bound={} , Tv loss={}'.
                          format(epoch, iter,
                                 (time.time() + time_train) / print_freq,
                                 np.mean(lower_bounds) , np.mean(tv_losses)))
                    lower_bounds = []
                    tv_losses = []

                if iter % test_freq == 0:

                    time_test = -time.time()
                    t_batch = t_test[:gen_size]
                    gen_images = sess.run(eval_x_gen,
                                          feed_dict={is_training: False,
                                                     code: t_batch})
                    name = "gen_{}/iwae_hccr.epoch.{}.iter.{}.png".format(n_y, epoch, iter)
                    name = os.path.join(result_path, name)
                    utils.save_image_collections(gen_images, name, shape=(test_ny, display_each_character),
                                                 scale_each=True)

                    # train reconstruction
                    x_batch = x_train_recon[:recon_size].reshape(-1, n_x)
                    x_batch_bin = sess.run(x_bin, feed_dict={x_orig: x_batch})
                    t_batch = t_train_recon[:recon_size]
                    eval_zs, recon_images = \
                        sess.run([eval_z_gen.tensor, eval_x_recon],
                                 feed_dict={x: x_batch_bin, is_training: False, code: t_batch})
                    name = "train_recon_{}/iwae_hccr.epoch.{}.iter.{}.png".format(n_y,
                                                                                epoch, iter)
                    name = os.path.join(
                        result_path, name)
                    utils.save_contrast_image_collections(x_batch.reshape(-1, n_xl, n_xl, n_channels), recon_images,
                                                          name, shape=(test_ny, display_each_character * 2),
                                                          scale_each=True)
                    # # train interpolation
                    x_batch_bin = sess.run(x_bin, feed_dict={x_orig: x_train_interp})
                    t_batch = t_train_interp
                    eval_zs, _ = \
                        sess.run([eval_z_gen.tensor, eval_x_recon],
                                 feed_dict={x: x_batch_bin, is_training: False, code: t_batch})
                    epsilon = np.linspace(0, 1, display_each_character)
                    eval_zs_interp = np.array(
                        [eps * eval_zs[0, 2 * i, :] + (1 - eps) * eval_zs[0, 2 * i + 1, :] for i in range(test_ny) for eps
                         in epsilon]).reshape(1, -1, n_z)
                    t_batch = np.tile([t_batch[2 * i, :] for i in range(test_ny)], (1, display_each_character)).reshape(-1,
                                                                                                                    n_code)
                    recon_images = \
                        sess.run(eval_x_interp, feed_dict={interp_z: eval_zs_interp, is_training: False, code: t_batch})
                    name = "interp_{}/iwae_hccr.epoch.{}.iter.{}.png".format(n_y, epoch, iter)
                    name = os.path.join(result_path, name)
                    utils.save_image_collections(recon_images, name, shape=(test_ny, display_each_character),
                                                 scale_each=True)

                    # test reconstruction
                    x_batch = x_test[:recon_size].reshape(-1, n_x)
                    x_batch_bin = sess.run(x_bin, feed_dict={x_orig: x_batch})
                    t_batch = t_test[:recon_size]
                    eval_zs, recon_images = \
                        sess.run([eval_z_gen.tensor, eval_x_recon],
                                 feed_dict={x: x_batch_bin, is_training: False, code: t_batch})
                    name = "test_recon_{}/iwae_hccr.epoch.{}.iter.{}.png".format(n_y,
                                                                                 epoch, iter)
                    name = os.path.join(
                        result_path, name)
                    utils.save_contrast_image_collections(x_batch.reshape(-1, n_xl, n_xl, n_channels), recon_images,
                                                          name, shape=(test_ny, display_each_character * 2),
                                                          scale_each=True)

                    # one-shot generation
                    x_batch = x_oneshot_test.reshape(-1, n_x)  # display_number*nxl*nxl*nchannel
                    x_batch_bin = sess.run(x_bin, feed_dict={x_orig: x_batch})
                    t_batch = t_oneshot_test
                    display_x_oneshot = np.zeros((display_each_character,  test_ny+ 1, n_xl, n_xl, n_channels))

                    eval_zs_oneshot = sess.run(eval_z_oneshot.tensor,
                                               feed_dict={x: x_batch_bin, is_training: False, code: t_batch})
                    # print (np.shape(eval_zs_oneshot)) #test_ny*nz
                    for i in range(display_each_character):
                        display_x_oneshot[i, 0, :, :, :] = x_batch[i, :].reshape(-1, n_xl, n_xl, n_channels)
                        tmp_z = np.zeros((1, test_ny, n_z))
                        for j in range(test_ny):
                            # print (np.shape(tmp_z) ,np.shape(eval_zs_oneshot))
                            tmp_z[0, j, :] = eval_zs_oneshot[0, i, :]
                        # _, eval_x_oneshot = decoder({'z': oneshot_z}, tf_ny, code, is_training)
                        #print tmp_z.shape , t_oneshot_gen_test.shape
                        tmp_x = sess.run(eval_x_oneshot,
                                         feed_dict={oneshot_z: tmp_z, tf_ny: test_ny, code: t_oneshot_gen_test,
                                                    is_training: False})
                        # print (np.shape(tmp_x))
                        display_x_oneshot[i, 1:, :, :, :] = tmp_x
                    display_x_oneshot = np.reshape(display_x_oneshot, (-1, n_xl, n_xl, n_channels))

                    ##TODO
                    display_x_oneshot = (display_x_oneshot > 0.5).astype(np.float32)

                    name = "oneshot_{}/iwae_hccr.epoch.{}.iter.{}.png".format(n_y,
                                                                              epoch, iter)
                    name = os.path.join(
                        result_path, name)

                    utils.save_image_collections(display_x_oneshot,
                                                 name, shape=(display_each_character, test_ny + 1),
                                                 scale_each=True)

                    # disentangle
                    t_batch = t_test[:recon_size]
                    z_each = np.random.normal(size=(display_each_character, n_z))
                    # print (z_each.shape)
                    z_batch = np.zeros((test_ny, display_each_character, n_z))
                    # print (z_batch.shape)
                    for i in range(test_ny):
                        z_batch[i, :, :] = z_each
                    z_batch = np.reshape(z_batch, (-1, n_z))
                    eval_disentange_x = \
                        sess.run(disentangle_x,
                                 feed_dict={disentange_z: z_batch, is_training: False, code: t_batch})
                    name = "disentangle_{}/iwae_hccr.epoch.{}.iter.{}.png".format(n_y,
                                                                                  epoch, iter)
                    name = os.path.join(
                        result_path, name)
                    utils.save_image_collections(eval_disentange_x,
                                                 name, shape=(test_ny, display_each_character),
                                                 scale_each=True)

                    time_test += time.time()

                if iter % save_freq == 0:
                    save_path = "iwae.epoch.{}.iter.{}.ckpt".format(epoch, iter)
                    save_path = os.path.join(result_path, save_path)
                    saver.save(sess, save_path)

                if iter % print_freq == 0:
                    time_train = -time.time()