def get_reynolds_stress_errors(data,
                               avg_vel_by_loc,
                               avg_vel_by_loc_w,
                               lower,
                               start=0,
                               end=0.18,
                               step=0.01):

    if type(avg_vel_by_loc) is str:
        avg_vel_by_loc = tls.read_json(avg_vel_by_loc)

    if type(avg_vel_by_loc_w) is str:
        avg_vel_by_loc_w = tls.read_json(avg_vel_by_loc_w)

    index = -1
    if lower:
        index = 0

    def param_f(elem, i):
        position = str(tls.group_by_location(elem, i)).replace("-0.0", "0.0")
        return (-elem.velocity()[i][0] - sorted(avg_vel_by_loc[position], key=lambda x: x[0])[index][0]) * \
                (-sorted(avg_vel_by_loc_w[position], key=lambda x: x[0])[index][0] + elem.velocity()[i][1])

    return tls.group_parameter(
        data, lambda t, i: tls.group_by_height(t, i, start, end, step),
        param_f)
Exemple #2
0
def get_acc(vel):
    sl = ft.Scene("/home/ron/Desktop/Alexey/the_dataset/traj_" + vel +
                  "_low.h5")
    sh = ft.Scene("/home/ron/Desktop/Alexey/the_dataset/traj_" + vel +
                  "_high.h5")

    acc_low = group_avarage_acc(sl, group_by_x_n_z)
    acc_to_save = {}
    for key in acc_low.keys():
        acc_to_save[key] = [acc_low[key][0].tolist(), acc_low[key][1]]
    tls.save_as_json(acc_to_save, "accel_by_x_and_z_" + vel + "_lower")
    print("Lower done")

    acc_high = group_avarage_acc(sh, group_by_x_n_z)
    acc_to_save = {}
    for key in acc_high.keys():
        acc_to_save[key] = [acc_high[key][0].tolist(), acc_high[key][1]]
    tls.save_as_json(acc_to_save, "accel_by_x_and_z_" + vel + "_higher")
    print("Higher done")

    m = tls.merge_dict(
        tls.read_json("accel_by_x_and_z_" + vel + "_higher"),
        tls.read_json("accel_by_x_and_z_" + vel + "_lower"),
        lambda a, b: [((np.array(a[0]) * a[1] + np.array(b[0]) * b[1]) /
                       (a[1] + b[1])).tolist(), a[1] + b[1]])
    tls.save_as_json(m, "accel_by_x_and_z_" + vel)
    print("DONE")
def get_dispersive_stress(data,
                          avg_vel_by_loc,
                          avg_vel_by_h,
                          start=0,
                          end=0.18,
                          step=0.01):

    if type(avg_vel_by_loc) is str:
        avg_vel_by_loc = tls.read_json(avg_vel_by_loc)
    if type(avg_vel_by_h) is str:
        avg_vel_by_h = tls.read_json(avg_vel_by_h)

    def param_f(elem, i):
        position = tls.group_by_location(elem, i)
        pos_str = str(position).replace("-0.0", "0.0")
        height = str(abs(position[1])) + " - " + str(abs(position[1]) + step)
        if height not in avg_vel_by_h.keys():
            height = str(abs(position[1]) - step) + " - " + str(
                abs(position[1]))
        return (-avg_vel_by_loc[pos_str][0][0] + avg_vel_by_h[height][0][0]) * \
                (avg_vel_by_loc[pos_str][0][1] - avg_vel_by_h[height][0][1])

    return tls.group_parameter(
        data, lambda t, i: tls.group_by_height(t, i, start, end, step),
        param_f)
Exemple #4
0
def sum_all_acc(vel,
                area=0.05 * 0.01,
                mult=mass,
                only_corner=True,
                version="2"):
    acc = tls.read_json("accel" + version + "_by_x_and_z_" + vel)
    acc_err = tls.read_json("Statistics/acc_nb_xz_mult_avgs_" + vel)
    total = {}
    count = {}
    num = {}
    err = {}
    Cd = {}
    Cd_g = {}
    for key in acc.keys():

        if only_corner and -float(key.split(", ")[0].replace('(', "")) <= 0.12:
            continue

        if key not in acc_err.keys():
            print key + " is missing"
            continue

        h = float(key.split(", ")[1].replace(")", ""))
        if h in total.keys():
            total[h] += np.array(acc[key][0]) * mult
            err[h][0] += sorted(acc_err[key], key=lambda a: a[0])[0][0] * mult
            err[h][1] += sorted(acc_err[key], key=lambda a: a[0])[-1][0] * mult
            count[h] += acc[key][1]
            num[h] += 1.0
        else:
            total[h] = np.array(acc[key][0]) * mult
            count[h] = acc[key][1]
            err[h] = [0, 0]
            err[h][0] = sorted(acc_err[key], key=lambda a: a[0])[0][0] * mult
            err[h][1] = sorted(acc_err[key], key=lambda a: a[0])[-1][0] * mult
            num[h] = 1.0
    tmp = raupach.get_drag_raupach(vel)["v"]
    for key in total.keys():
        total[key] = [total[key] / num[key], count[key]]
        Cd_g[key] = (-total[key][0][0] * 2) / (air_density * (float(vel)**2) *
                                               area), count[key]
        err[key][0] = abs((((err[key][0] / num[key]) * 2) /
                           (air_density * (float(vel)**2) * area)) -
                          Cd_g[key][0])
        err[key][1] = abs((((err[key][1] / num[key]) * 2) /
                           (air_density * (float(vel)**2) * area)) -
                          Cd_g[key][0])
        try:
            Cd[key] = (-total[key][0][0] * 2) / (
                air_density *
                (tmp[round(key * 100.0) - 0.5]**2) * area), count[key]
        except KeyError:
            pass
    return total, Cd, Cd_g, err
Exemple #5
0
def quiver_velocity(vel):
    acc = tls.read_json("raupach_data/avg_vel_by_loc_" + vel)
    fig, ax = pplot.subplots()
    matx = np.zeros((18, 18))
    matcount = np.zeros((18, 18))
    matz = np.zeros((18, 18))

    for key in acc.keys():
        x = int(float(key.split(", ")[0].replace("(", "")) * 100.0)
        z = int(float(key.split(", ")[1]) * 100.0)
        if x > 0:
            x *= -1
        matx[z, -x] += -acc[key][0][0] * acc[key][1]
        matz[z, -x] += acc[key][0][1] * acc[key][1]
        matcount[z, -x] += acc[key][1]

    Y = np.zeros((18, 18))
    X = np.zeros((18, 18))
    for i in range(len(Y)):
        for j in range(len(Y)):
            Y[i, j] = i / 10.0
            X[i, j] = j / 10.0
            if matcount[i, j] != 0:
                matx[i, j] /= matcount[i, j]
                matz[i, j] /= matcount[i, j]

    ax.quiver(X, Y, matx, matz, units="inches", scale=1)

    ax.set_xlabel(r"x/H")
    ax.set_ylabel(r"z/H")

    return fig, ax
Exemple #6
0
def draw_quiver(vel):
    acc = tls.read_json("accel_by_x_and_z_" + vel)
    fig, ax = pplot.subplots()
    matx = np.zeros((18, 18))
    matz = np.zeros((18, 18))

    for key in acc.keys():
        if acc[key][1] < 10000:
            continue
        x = int(float(key.split(", ")[0].replace("(", "")) * 100)
        if x > 0:
            continue
        z = int(float(key.split(", ")[1].replace(")", "")) * 100)
        matx[z,
             -x] = math.sqrt(abs(-acc[key][0][0])) * np.sign(-acc[key][0][0])
        matz[z, -x] = math.sqrt(abs(acc[key][0][1])) * np.sign(acc[key][0][1])

    Y = np.zeros((18, 18))
    X = np.zeros((18, 18))
    for i in xrange(len(Y)):
        for j in xrange(len(Y)):
            Y[i, j] = i / 10.0
            X[i, j] = j / 10.0

    ax.quiver(X, Y, matx, matz, units="inches", scale=8)

    ax.set_xlabel(r"x/H")
    ax.set_ylabel(r"z/H")

    return fig, ax
Exemple #7
0
def read_problem(in_path):
    raw = tools.read_json(in_path)
    start = convex.ConvexPolygon(raw["start"])
    end = convex.ConvexPolygon(raw["end"])
    obstacles = [convex.ConvexPolygon(ver_i) for ver_i in raw["collision"]]
    pol_envir = collision.PolygonEnvir(obstacles)
    return Problem(start, end, pol_envir)
Exemple #8
0
def calc_vel_and_drag_from_data_Cd(data, area=0.0005, acc=minimum_acc):
    ret = {}

    if type(data) is str:
        data = tls.read_json(data)

    x_vel = []
    for key in sorted(data.keys()):
        if data[key][1] < acc or key == "no group":
            continue

        key_splat = key.split(' - ')
        height = (float(key_splat[0]) + float(key_splat[1])) / 2
        x_vel.append([-data[key][0][0], height])

    ret["x_velocities"] = x_vel

    drag_list = []
    for elem in x_vel:
        drag_list.append([estimate_drag_Cd(elem[0], area), elem[1]])

    ret["drag_list"] = drag_list

    ret["drag"] = reduce(lambda a, b: a + b[0], drag_list, 0)

    return ret
Exemple #9
0
class TestApprove(unittest.TestCase):
    # 初始化
    def setUp(self) -> None:
        # 获取session对象
        self.session = requests.session()
        # 获取ApiCommon对象
        self.common = ApiCommon(self.session)
        # 获取ApiApprove对象
        self.approve = self.common.get_api_approve()

    # 结束
    def tearDown(self) -> None:
        # 关闭session
        self.session.close()

    # 1、认证接口 测试方法
    @parameterized.expand(read_json("approve.json", "approve"))
    def test01_approve(self, realname, card_id, response_code, status_code,
                       expect_msg):
        # 调用登录
        self.common.get_api_reg_login().api_login("13600001111", "test12345")
        # 调用 认证接口
        r = self.approve.api_approve(realname, card_id)
        log.info("认证结果为:{}".format(r.json()))
        print("认证结果为:", r.json())
        # 断言
        common_assert(self, r, response_code, status_code, expect_msg)

    # 2、认证查询接口 测试方法
    @parameterized.expand(read_json("approve.json", "get_approve"))
    def test02_get_approve(self, login_name, response_code, phone_4):
        # 调用登录
        self.common.get_api_reg_login().api_login(login_name, "test12345")
        # 调用查询认证接口
        r = self.approve.api_get_approve()
        log.info("认证查询结果为:{}".format(r.json()))
        # 断言
        common_assert(self, r, response_code=response_code, status_code=None)
        self.assertEqual(response_code, r.status_code)
        if phone_4:
            # 扩展 断言手机号 尾号
            phone = r.json().get("phone")
            print("phone:", phone)
            self.assertIn(phone_4, phone)
Exemple #10
0
def get_error(file_name, h, mult=1.0):
    data = tls.read_json(file_name)
    for key in data.keys():
        splat = key.split(" - ")
        firstNum = float(splat[0])
        secondNum = float(splat[1])
        if h > firstNum and h < secondNum:
            rel_array = data[key]
            rel_array.sort(key=lambda a: a[0])
            return np.array([rel_array[0][0], rel_array[-1][0]]) * mult
def get_average_error(value_group, error_group, limit=10000):

    value_data = read_json(value_group)
    error_data = read_json(error_group)

    avg = 0.0
    count = 0.0

    for key in value_data.keys():
        if key in error_data.keys():
            value = value_data[key]
            if value[1] > limit:
                acc = -value[0][0]
                err_orderd = list(map(lambda a: a[0], error_data[key]))
                err_orderd.sort()
                avg += (abs(err_orderd[0] - acc) +
                        abs(err_orderd[-1] - acc)) / 2.0
                count += 1

    return avg / count
Exemple #12
0
def get_error_cd_modified(file_name, h):
    data = tls.read_json(file_name)
    for key in data.keys():
        splat = key.split(" - ")
        firstNum = float(splat[0])
        secondNum = float(splat[1])
        if h > firstNum and h < secondNum:
            rel_array = data[key]
            rel_array.sort(key=lambda a: a[0])
            return np.array([(estimate_drag_Cd(rel_array[0][0], 0.0005)),
                             (estimate_drag_Cd(rel_array[-1][0], 0.0005))])
def get_reynolds_stress(data, avg_vel_by_loc, start=0, end=0.18, step=0.01):

    if type(avg_vel_by_loc) is str:
        avg_vel_by_loc = tls.read_json(avg_vel_by_loc)

    def param_f(elem, i):
        position = str(tls.group_by_location(elem, i)).replace("-0.0", "0.0")
        return (avg_vel_by_loc[position][0][0] - elem.velocity()[i][0]) * \
                (-avg_vel_by_loc[position][0][1] + elem.velocity()[i][1])

    return tls.group_parameter(
        data, lambda t, i: tls.group_by_height(t, i, start, end, step),
        param_f)
Exemple #14
0
    def restore(self, filename, model_and_loss, include_params="*", exclude_params=()):
        # -----------------------------------------------------------------------------------------
        # Make sure file exists
        # -----------------------------------------------------------------------------------------
        if not os.path.isfile(filename):
            logging.info("Could not find checkpoint file '%s'!" % filename)
            quit()

        # check which file version is latest
        no_extension = filename.split('.')[0]
        statistics_filename = no_extension + ".json"
        statistics = tools.read_json(statistics_filename)
        logging.info('JSON: ' + str(statistics))
        shadow_is_latest = statistics['shadow']
        logging.info('Shadow is latest: ' + str(shadow_is_latest))

        # -----------------------------------------------------------------------------------------
        # Load checkpoint from file including the state_dict
        # -----------------------------------------------------------------------------------------
        if shadow_is_latest:
            checkpoint_with_state = torch.load(no_extension + '_shadow' + '.ckpt')
        else:
            checkpoint_with_state = torch.load(filename)

        # -----------------------------------------------------------------------------------------
        # Load filtered state dictionary
        # -----------------------------------------------------------------------------------------
        state_dict = checkpoint_with_state[self._model_key]
        restore_keys = tools.filter_list_of_strings(
            state_dict.keys(),
            include=include_params,
            exclude=exclude_params)
        state_dict = {key: value for key, value in state_dict.items() if key in restore_keys}
        self._load_state_dict_into_module(state_dict, model_and_loss)
        # logging.info("  Restore keys:")
        # for key in restore_keys:
        #     logging.info("    %s" % key)

        # -----------------------------------------------------------------------------------------
        # Get checkpoint statistics without the state dict
        # -----------------------------------------------------------------------------------------
        checkpoint_stats = {
            key: value for key, value in checkpoint_with_state.items() if key != self._model_key
        }
        logging.info('STATS ' + str(checkpoint_stats))

        return checkpoint_stats, filename
Exemple #15
0
    def update(self):
        if not os.path.exists(self.tlsfile):
            print("瓦片集配置文件:" + self.tlsfile + "不存在!")
            return
        self.tlsconf = tools.read_json(self.tlsfile)
        self.tlsdir, _ = os.path.split(self.tlsfile)

        # 各种配置路径
        self.source = self.tlsdir + "/" + self.tlsconf["source"]
        self.dest = self.tlsdir + "/" + self.tlsconf["dest"]

        # 遍历所有配置
        if os.path.isdir(self.source):
            if not os.path.isdir(self.dest):
                os.makedirs(self.dest)
            for tlconf in self.tlsconf["tilesets"]:
                self.gen_tileset(tlconf)
Exemple #16
0
def configure_checkpoint_saver(args, model_and_loss):
    with logger.LoggingBlock("Checkpoint", emph=True):
        checkpoint_saver = CheckpointSaver()
        checkpoint_stats = None

        if args.checkpoint is None:
            logging.info("No checkpoint given.")
            logging.info("Starting from scratch with random initialization.")

        elif os.path.isfile(args.checkpoint):
            logging.info("Loading checkpoint file %s" % args.checkpoint)
            checkpoint_stats, filename = checkpoint_saver.restore(
                filename=args.checkpoint,
                model_and_loss=model_and_loss,
                include_params=args.checkpoint_include_params,
                exclude_params=args.checkpoint_exclude_params)

            # load epoch number
            no_extension = filename.split('.')[0]
            statistics_filename = no_extension + ".json"
            statistics = tools.read_json(statistics_filename)
            args.start_epoch = statistics['epoch'] + 1

        elif os.path.isdir(args.checkpoint):
            if args.checkpoint_mode in ["resume_from_best"]:
                logging.info("Loading best checkpoint in %s" % args.checkpoint)
                checkpoint_stats, filename = checkpoint_saver.restore_best(
                    directory=args.checkpoint,
                    model_and_loss=model_and_loss,
                    include_params=args.checkpoint_include_params,
                    exclude_params=args.checkpoint_exclude_params)

            elif args.checkpoint_mode in ["resume_from_latest"]:
                logging.info("Loading latest checkpoint in %s" % args.checkpoint)
                checkpoint_stats, filename = checkpoint_saver.restore_latest(
                    directory=args.checkpoint,
                    model_and_loss=model_and_loss,
                    include_params=args.checkpoint_include_params,
                    exclude_params=args.checkpoint_exclude_params)
            else:
                logging.info("Unknown checkpoint_restore '%s' given!" % args.checkpoint_restore)
                quit()
        else:
            logging.info("Could not find checkpoint file or directory '%s'. Starting with random initialization." % args.checkpoint)

    return checkpoint_saver, checkpoint_stats
Exemple #17
0
def filter_video(video_name, json_files, video_path):
    os.makedirs(video_path + '/json_tmp/', exist_ok=True)

    cap = cv2.VideoCapture(video_path + '/' + video_name)
    fps = int(math.ceil(cap.get(5)))  #FPS
    width = int(cap.get(3))  # WIDTH
    height = int(cap.get(4))  # HEIGHT
    font = cv2.FONT_HERSHEY_SIMPLEX  #Creates a font

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out_debug = cv2.VideoWriter(video_path + '/debug.mp4', fourcc, fps,
                                (width, height))

    i = 0
    point_x, point_y = width / 2, height / 2
    while (cap.isOpened() and i < len(json_files)):
        ret, frame = cap.read()
        if ret:
            kps = read_json(video_path + '/openpose/json/' + json_files[i])
            if len(kps) != 0:
                #get closer skeleton of the previous one
                json_path = video_path + '/json_tmp/' + json_files[i].split(
                    '_')[2] + '.json'

                try:
                    point_x, point_y, idx = get_skeleton(
                        kps, json_path, point_x, point_y)
                except Exception as e:
                    print(e)

                drawed_frame = draw(kps[idx], frame)
                drawed_frame = np.uint8(drawed_frame)
                cv2.putText(drawed_frame, json_files[i].split('/')[-1],
                            (10, 50), font, 1, (0, 0, 255), 2,
                            cv2.LINE_AA)  #Draw the text
                out_debug.write(drawed_frame)

        i = i + 1

    ## RM NON FILTER DIR
    cmd = "rm -rf " + video_path + '/openpose/json/'
    os.system(cmd)

    ## MV FILTERED FILES TO OPENPOSE DIR
    cmd = "mv " + video_path + '/json_tmp ' + video_path + '/openpose/json'
    os.system(cmd)
Exemple #18
0
    def save_latest(self, directory, model_and_loss, stats_dict, store_as_best=False):
        # -----------------------------------------------------------------------------------------
        # Make sure directory exists
        # -----------------------------------------------------------------------------------------
        tools.ensure_dir(directory)

        # check previous latest file version
        latest_statistics_filename = os.path.join(
            directory, self._prefix + self._latest_postfix + ".json")
        if os.path.isfile(latest_statistics_filename):
            statistics = tools.read_json(latest_statistics_filename)
            shadow_is_latest = statistics['shadow']
        else:
            shadow_is_latest = True
        stats_dict['shadow'] = not shadow_is_latest

        # -----------------------------------------------------------------------------------------
        # Save
        # -----------------------------------------------------------------------------------------
        save_dict = dict(stats_dict)
        save_dict[self._model_key] = model_and_loss.state_dict()

        if shadow_is_latest:
            latest_checkpoint_filename = os.path.join(
                directory, self._prefix + self._latest_postfix + self._extension)
        else:
            latest_checkpoint_filename = os.path.join(
                directory, self._prefix + self._latest_postfix + '_shadow' + self._extension)

        torch.save(save_dict, latest_checkpoint_filename)
        tools.write_json(data_dict=stats_dict, filename=latest_statistics_filename)

        # -----------------------------------------------------------------------------------------
        # Possibly store as best
        # -----------------------------------------------------------------------------------------
        if store_as_best:
            best_checkpoint_filename = os.path.join(
                directory, self._prefix + self._best_postfix + self._extension)

            best_statistics_filename = os.path.join(
                directory, self._prefix + self._best_postfix + ".json")

            logging.info("Saved checkpoint as best model..")
            shutil.copyfile(latest_checkpoint_filename, best_checkpoint_filename)
            shutil.copyfile(latest_statistics_filename, best_statistics_filename)
Exemple #19
0
    def update(self):
        if not os.path.exists(self.headfile):
            print("头像配置文件:" + self.headfile + "不存在!")
            return
        self.headconf = tools.read_json(self.headfile)
        self.mdsdir, _ = os.path.split(self.headfile)
        self.source = self.mdsdir + "/" + self.headconf["source"]
        self.dest = self.mdsdir + "/" + self.headconf["dest"]
        if not os.path.exists(self.dest):
            os.makedirs(self.dest)

        # 遍历所有头像配置
        for headc in self.headconf["heads"]:
            if headc["flipX"]:
                srcimg = Image.open(self.source + "/" + headc["image"])
                srcimg = srcimg.transpose(Image.FLIP_LEFT_RIGHT)
                srcimg.save(self.dest + "/" + headc["head"])
            else:
                shutil.copyfile(self.source + "/" + headc["image"],
                                self.dest + "/" + headc["head"])
Exemple #20
0
import tensorflow as tf
import logging
import time
import numpy as np
from datetime import timedelta
from tqdm import tqdm
from Model.Transformer import transformer_model, create_attention_mask_from_input_mask
from tools import read_json, save_json
from Optimization import create_optimizer

logging.basicConfig(level=logging.INFO)

events = ['EquityFreeze', 'EquityRepurchase', 'EquityUnderweight', 'EquityOverweight', 'EquityPledge']

cache = read_json('./tfrecord/ner_tag.json')
tag_map = {}

for k in cache.keys():
    tag_map[cache[k]] = k

events_fields = {'EquityFreeze': ['EquityHolder',
                                  'FrozeShares',
                                  'LegalInstitution',
                                  'TotalHoldingShares',
                                  'TotalHoldingRatio',
                                  'StartDate',
                                  'EndDate',
                                  'UnfrozeDate'],
                 'EquityRepurchase': ['CompanyName',
                                      'HighestTradingPrice',
                                      'LowestTradingPrice',
def get_std_h(h, vel):
    data = read_json(std_root + str(vel))
    relevant = str(
        filter(lambda a: abs(h - float(a)) < 0.0001, data.keys())[0])
    print relevant
    return sqrt(data[relevant][0] / (data[relevant][1] - 1))
Exemple #22
0
class TestTrust(unittest.TestCase):
    # 初始化
    def setUp(self) -> None:
        # 获取session对象
        self.session = requests.session()
        # 获取ApiCommon对象
        self.common = ApiCommon(self.session)
        # 调用登录接口
        self.common.get_api_reg_login().api_login("13600001111", "test12345")
        # 获取ApiTrust对象
        self.trust = self.common.get_api_trust()

    # 结束
    def tearDown(self) -> None:
        # 关闭session
        self.session.close()

    # 开户接口测试方法
    @parameterized.expand(read_json("trust.json", "trust"))
    def test01_trust(self, expect_msg):
        log.info("开户参数化 expect_msc的值为:{}".format(expect_msg))
        # 1、调用开户接口
        r = self.trust.api_trust()
        log.info("开户结果:{}".format(r.json()))
        # 2、提取html数据
        result = html_parser(r)
        log.info("提取开户三方请求数据结果:{}".format(result))
        # 3、请求三方开户
        r = self.session.post(result[0], data=result[1])
        log.info("三方开户请求结果:{}".format(r.text))
        # 4. 断言
        self.assertEqual(expect_msg, r.text)

    # 充值验证码接口测试方法
    @parameterized.expand(read_json("trust.json", "trust_code"))
    def test02_trust_code(self, random, response_code):
        r = self.trust.api_trust_code(random)
        print("r:", r.text)
        log.info("充值验证码返回响应状态码为:{}".format(r.status_code))
        common_assert(self, r, response_code=response_code, status_code=None)

    # 充值接口测试方法
    @parameterized.expand(read_json("trust.json", "trust_recharge"))
    def test03_trust_recharge(self, amount, valicode, expect_msg):
        # 充值验证码
        self.trust.api_trust_code(12312313)
        # 1. 调用充值接口
        r = self.trust.api_trust_recharge(amount, valicode)
        log.info("充值接口返回结果:{}".format(r.json()))
        if valicode != 8888:
            common_assert(self, r, status_code=100, description=expect_msg)
        else:
            # 2. 提取html
            result = html_parser(r)
            log.info("提取充值三方请求数据结果:{}".format(result))
            # 3. 调用三方充值
            r = self.session.post(result[0], data=result[1])
            log.info("三方充值结果:{}".format(r.text))
            # 4. 断言
            print(r.text)
            self.assertEqual(expect_msg, r.text)
Exemple #23
0
 def initialize(self, data):
     self.read(data)
     self.num_trial = 0
     self.response = read_json("static/style.json")
     self.style = read_json("static/style.json")
     print('initialize' + '-' * 30 + f'\n{self}')
def list_subcategories():
    """ Return the subcategories from subcategories.json """
    global LIST_SUBCATEGORIES
    LIST_SUBCATEGORIES = tools.read_json('subcategories.json')
    return LIST_SUBCATEGORIES
Exemple #25
0
# import tools
# from tools import *
from tools import read_json, generate_string, get_slise
# from lesson10 import generate_string
import os

print(__name__)

filename = "Homeworks/data.json"

result = read_json(filename)

print(result)

res = generate_string(10, 3)
res = get_slise([1, 2, 3, 4, 5], 10, 3)
print(res)

folder_name = "Homeworks"

files = [file for file in os.listdir(folder_name) if ".json" in file]
# files = [file for file in os.listdir(folder_name) if os.path.isfile(os.path.join(folder_name, file))]
print(files)
for file in files:
    res = read_json(os.path.join(folder_name, file))
    print(res)
class TestRegLogin(unittest.TestCase):
    # 初始化
    def setUp(self) -> None:
        # 获取session对象
        self.session = requests.session()
        logger.info("正在获取session对象:{}".format(self.session))
        # 获取ApiRegLogin对象
        self.api = ApiRegLogin(self.session)
        logger.info("正在获取ApiRegLogin对象:{}".format(self.api))

    # 结束
    def tearDown(self) -> None:
        logger.info("正在关闭session对象")
        # 关闭session对象
        self.session.close()

    # 1.注册图片验证码 测试方法
    @parameterized.expand(read_json("register_login.json","img_code_case"))
    def test01_img_code(self,random,expect_code):
        # 调用图片验证码接口
        r = self.api.reg_img_code(random)
        # 断言 响应200
        print("响应状态码:", r.status_code)
        try:
            self.assertEqual(expect_code, r.status_code)
        except Exception as e:
            logger.error(e)
            raise

    # 2. 注册 短信验证码
    @parameterized.expand(read_json("register_login.json", "sms_code_case"))
    def test02_sms_code(self,phone,imgverifycode,type,expect_code,expect_status,description):
        # 1. 获取图片验证码 目的: 使用session对象自动记录cookie
        self.api.reg_img_code(random.random())
        # 2. 获取短信验证码
        r = self.api.reg_sms_code(phone,imgverifycode,type=type)
        print("获取短信验证码 结果为: ", r.json())
        try:
            # 调用断言方法
            common_assert(self, r,status_code=expect_code,status=expect_status, description=description)
        except Exception as e:
            logger.error(e)
            raise

    # 3. 注册 测试方法
    @parameterized.expand(read_json("register_login.json", "reg_case"))
    def test03_reg(self,phone4,password,imgVerifyCode,phone_code,dy_server,invite_phone,expect_code,status,description):
        # 1. 获取图片验证码
        self.api.reg_img_code(random.random())
        # 2. 获取短信验证码
        self.api.reg_sms_code(phone4, imgVerifyCode)
        # 3. 调用注册接口
        r = self.api.reg(phone4,
                         password,
                         imgVerifyCode,
                         phone_code,
                         dy_server,
                         invite_phone)
        print("响应状态码:", r.json())
        logger.info("请求数据:{} 响应结果:{}".format(
            (phone4, password, imgVerifyCode, phone_code, dy_server, invite_phone, expect_code, status,
             description), r.json()))
        try:
            # 4. 断言注册信息
            common_assert(self, r,status_code=expect_code,status=status,description=description)
        except Exception as e:
            logger.error(e)
            raise

    # 4. 登录 测试方法
    @parameterized.expand(read_json("register_login.json", "login_case"))
    def test04_login(self,keywords, password, expect_code):
        r = self.api.login(keywords, password)
        logger.info("请求数据:{} 响应数据:{}".format((keywords, password, expect_code), r.json()))

        print("登录结果:", r.json())
        if "error" in password:
            logger.info("锁定60验证...")
            r = self.api.login(keywords, password)
            logger.info("请求数据:{} 响应数据:{}".format((keywords, password, expect_code), r.json()))
            print("登陆结果:", r.json())

            r = self.api.login(keywords, password)
            logger.info("请求数据:{} 响应数据:{}".format((keywords, password, expect_code), r.json()))
            print("登陆结果:", r.json())

            time.sleep(60)
            r = self.api.login("13600001111", "q123456")
            logger.info("请求数据:{} 响应数据:{}".format((keywords, password, expect_code), r.json()))
            print("登陆结果:", r.json())
        try:
            # 4. 断言登录信息
            common_assert(self, r, status_code=expect_code)
        except Exception as e:
            logger.error(e)
            raise

    # 5. 是否登录 测试方法
    @parameterized.expand(read_json("register_login.json", "islogin_case"))
    def test05_is_login(self,phone4,password,expect_code):
        # 1.调用登录
        self.api.login(phone4, password)
        # 2.判断是否登录
        r = self.api.islogin()
        print("登录查询结果:", r.json())
        logger.info("请求数据:{} 执行结果:{}".format((phone4, password, expect_code), r.json()))
        try:
            common_assert(self, r, status=expect_code)
        except Exception as e:
            logger.error(e)
            raise
Exemple #27
0
                      metrics=[CRF.sparse_accuracy])
        model.summary()
        return model

    def train(self):
        history = self.model.fit_generator(self.generator.__iter__(),
                                           steps_per_epoch=len(self.generator),
                                           epochs=10)

        with open('trainHistoryDict.txt', 'wb') as file_pi:
            pickle.dump(history.history, file_pi)

    def predict(self):
        pass


if __name__ == '__main__':
    ROOT_PATH = '/Users/ouhon/PycharmProjects/keras_nlp_tutorial/NER/lstm_crf/'
    path = ROOT_PATH + 'data/raw_data/dataset.jsonl'
    model_path = ROOT_PATH + 'lstm_crf.h5'
    data = read_jsonline(path)
    train_data = data[:int(len(data) * 0.8)]
    val_data = data[int(len(data) * 0.8):]
    token_dict = read_json(ROOT_PATH + 'data/raw_data/token2i.json')
    label_dict = read_json(ROOT_PATH + 'data/raw_data/label2i.json')
    i2label = {str(v): str(i) for i, v in label_dict.items()}
    generator = Data_generator(train_data, token_dict, label_dict)
    v_generator = Data_generator(val_data, token_dict, label_dict)
    lstm_crf = LstmCrf(token_dict, generator, v_generator)
    lstm_crf.train()
Exemple #28
0
    def __init__(self,
                 config_file=None,
                 exp_config=None,
                 hh_config=None,
                 prob_config=None):
        """
        Initialise the experiment object.

        :param str config_file:
            Name of the configuration JSON file with the configuration dictionaries: exp_config, hh_config, and
            prob_config. If only the filename is provided, it is assumed that such a file is in the directory
            './exconf/'. Otherwise, the full path must be entered. The default value is None.

        :param dict exp_config:
            Configuration dictionary related to the experiment. Keys and default values are listed as follows:

            'experiment_name':              'test',         # Name of the experiment
            'experiment_type':              'default',      # Type: 'default', 'brute_force', 'basic_metaheuristics'
            'heuristic_collection_file':    'default.txt',  # Heuristic space located in /collections/
            'weights_dataset_file':         None,           # Weights or probability distribution of heuristic space
            'use_parallel':                 True,           # Run the experiment using a pool of processors
            'parallel_pool_size':           None,           # Number of processors available, None = Default
            'auto_collection_num_vals':     5               # Number of values for creating an automatic collection

            **NOTE 1:** 'experiment_type': 'default' or another name mean hyper-heuristic.
            **NOTE 2:** If the collection does not exist and it is not a reserved one ('default.txt', 'automatic.txt',
            'basicmetaheuristics.txt', 'test_collection'), then an automatic heuristic space is generated with
            ``Operators.build_operators`` with 'auto_collection_num_vals' as ``num_vals`` and
            'heuristic_collection_file' as ``filename``.
            **NOTE 3:** # 'weights_dataset_file' must be determined in a pre-processing step. For the 'default'
            heuristic space, it is provided 'operators_weights.json'.

        :param dict hh_config:
            Configuration dictionary related to the hyper-heuristic procedure. Keys and default values are listed as
            follows:

            'cardinality':                      3,          # Maximum cardinality used for building metaheuristics
            'num_agents':                       30,         # Population size employed by the metaheuristic
            'num_iterations':                   100,        # Maximum number of iterations used by the metaheuristic
            'num_replicas':                     50,         # Number of replicas for each metaheuristic implemented
            'num_steps':                        100,        # * Number of steps that the hyper-heuristic performs
            'max_temperature':                  200,        # * Initial temperature for HH-Simulated Annealing
            'stagnation_percentage':            0.3,        # * Percentage of stagnation used by the hyper-heuristic
            'cooling_rate':                     0.05        # * Cooling rate for HH-Simulated Annealing

            **NOTE 4:** Keys with * correspond to those that are only used when ``exp_config['experiment_type']`` is
            neither 'brute_force' or 'basic_metaheuristic'.

        :param dict prob_config:
            Configuration dictionary related to the problems to solve. Keys and default values are listed as follows:

            'dimensions':       [2, 5, 10, 20, 30, 40, 50], # List of dimensions for the problem domains
            'functions':        bf.__all__,                 # List of function names of the optimisation problems
            'is_constrained':   True                        # True if the problem domain is hard constrained

        :return: None.

        """
        # If there is a configuration file
        if config_file:
            # Adjustments
            directory, filename = path.split(config_file)
            if directory == '':
                directory = './exconf/'  # Default directory
            basename, extension = path.splitext(filename)
            if extension not in ['.json', '']:
                raise ExperimentError("Configuration file must be JSON")

            # Existence verification and load
            full_path = path.join(directory, basename + '.json')
            if path.isfile(full_path):
                all_configs = tl.read_json(full_path)

                # Load data from json file
                exp_config = all_configs['ex_config']
                hh_config = all_configs['hh_config']
                prob_config = all_configs['prob_config']
            else:
                raise ExperimentError(f"File {full_path} does not exist!")
        else:
            if exp_config is None:
                exp_config = dict()
            if hh_config is None:
                hh_config = dict()
            if prob_config is None:
                prob_config = dict()

        # Load the default experiment configuration and compare it with exp_cfg
        self.exp_config = tl.check_fields(
            {
                'experiment_name': 'test',
                'experiment_type':
                'default',  # 'default' -> hh, 'brute_force', 'basic_metaheuristics'
                'heuristic_collection_file': 'default.txt',
                'weights_dataset_file': None,  # 'operators_weights.json',
                'use_parallel': True,
                'parallel_pool_size': None,  # Default
                'auto_collection_num_vals': 5
            },
            exp_config)

        # Load the default hyper-heuristic configuration and compare it with hh_cfg
        self.hh_config = tl.check_fields(
            {
                'cardinality': 3,
                'num_agents': 30,
                'num_iterations': 100,
                'num_replicas': 50,
                'num_steps': 100,
                'max_temperature': 200,
                'stagnation_percentage': 0.3,
                'cooling_rate': 0.05
            }, hh_config)

        # Load the default problem configuration and compare it with prob_config
        self.prob_config = tl.check_fields(
            {
                'dimensions': [2, 5, *range(10, 50 + 1, 10)],
                'functions': bf.__all__,
                'is_constrained': True
            }, prob_config)

        # Check if there is a special case of function name: <choose_randomly>
        self.prob_config['functions'] = [
            bf.__all__[hyp.np.random.randint(0, len(bf.__all__))]
            if fun == '<choose_randomly>' else fun
            for fun in self.prob_config['functions']
        ]

        # Check if the heuristic collection exists
        if not path.isfile('./collections/' +
                           self.exp_config['heuristic_collection_file']):
            # If the name is a reserved one. These files cannot be not created automatically
            if exp_config['heuristic_collection_file'] in [
                    'default.txt', 'automatic.txt', 'basicmetaheuristics.txt',
                    'test_collection'
            ]:
                raise ExperimentError(
                    'This collection name is reserved and cannot be created automatically!'
                )
            else:
                Operators.build_operators(
                    Operators.obtain_operators(
                        num_vals=exp_config['auto_collection_num_vals']),
                    file_name=exp_config['heuristic_collection_file'].split(
                        '.')[0])
                self.exp_config['weights_dataset_file'] = None

        # Check if the weights dataset not exist or required
        if self.exp_config['weights_dataset_file'] and (
                self.exp_config['experiment_type']
                not in ['brute_force', 'basic_metaheuristics']):
            if path.isfile('collections/' +
                           self.exp_config['weights_dataset_file']):
                self.weights_data = tl.read_json(
                    'collections/' + self.exp_config['weights_dataset_file'])
            else:
                raise ExperimentError(
                    'A valid weights_dataset_file must be provided in exp_config'
                )
        else:
            self.weights_data = None
Exemple #29
0
            )
        ]

        history = self.model.fit_generator(
            self.generator.__iter__(),
            steps_per_epoch=len(generator),
            epochs=3,
            callbacks=callbacks_list,
            validation_data=self.v_generator.__iter__(),
            nb_val_samples=2000)

    def predict(self):
        pass


if __name__ == '__main__':
    path = ROOT_PATH + 'data/toutiao/toutiao_train.csv'
    val_path = ROOT_PATH + 'data/toutiao/toutiao_test.csv'
    train_data = pd.read_csv(path)
    val_data = pd.read_csv(val_path)
    token_dict = read_json(ROOT_PATH + 'data/toutiao/token_dict.json')
    label_dict = read_json(ROOT_PATH + 'data/toutiao/label2i.json')
    train_D = [(x, y)
               for x, y in zip(train_data['x'], train_data['y'])][:10000]
    val_D = [(x, y) for x, y in zip(val_data['x'], val_data['y'])][:5000]
    generator = data_generator(train_D, token_dict, label_dict)
    v_generator = data_generator(val_D, token_dict, label_dict)
    print('build model')
    fast_text_model = FastText(token_dict, generator, v_generator)
    fast_text_model.train()
Exemple #30
0
def plot_accl(vel):
    fig, ax = pplot.subplots()
    scale, sax = pplot.subplots()
    acc = tls.read_json("accel_by_x_and_z_" + vel)
    vals_raw = []

    for item in acc.keys():
        if acc[item][1] < 10000:
            continue
        x = int(float(item.split(", ")[0].replace("(", "")) * 100)
        if x > 0:
            continue
        vals_raw.append(-acc[item][0][0])

    maximum = max(vals_raw)
    minimum = min(vals_raw)
    mat = np.zeros(shape=(18, 18, 3)).tolist()
    mat_s = np.zeros(shape=(1, 99, 3)).tolist()

    for key in acc.keys():
        x = int(float(key.split(", ")[0].replace("(", "")) * 100)
        if x > 0:
            continue
        if acc[key][1] < 10000:
            continue
        z = int(float(key.split(", ")[1].replace(")", "")) * 100)
        val = ((-acc[key][0][0] - minimum) / (maximum - minimum))
        mat[z][-x][0] = val
        mat[z][-x][1] = 0
        mat[z][-x][2] = 1.0 - val
        ax.text(-x / 10.0 + 0.05,
                z / 10.0 + 0.05,
                round(-acc[key][0][0], 1),
                ha="center",
                va="center",
                color="w",
                size=7.8)

    for i in xrange(11):
        if i <= 5:
            mat[i][7] = [1, 1, 1]
        mat[i][15] = [1, 1, 1]

    val = 0.01
    ind = 0
    while val <= 1:
        mat_s[0][ind][0] = val
        mat_s[0][ind][1] = 0
        mat_s[0][ind][2] = 1 - val
        ind += 1
        val += 0.01

    sax.imshow(mat_s, extent=[minimum, maximum, 0, 1], aspect='auto')
    sax.set_xticks(
        map(lambda a: round(a, 2), np.linspace(minimum, maximum, 12)))
    ax.imshow(mat, extent=[0, 1.8, 0, 1.8], origin="lower", aspect='auto')

    ax.set_xlabel(r"$x/H$")
    ax.set_ylabel(r"$z/H$")

    return fig, ax, scale, sax