Пример #1
0
class TestLogin:

    # 2.定义初始化方法
    def setup_class(self):
        # 获取自媒体端的浏览器驱动对象并且赋给driver的实例属性
        self.driver = DriverUtils.get_mp_driver()
        # 创建业务层对象
        self.login_proxy = LoginPorxy()

    # 3.定义销毁方法
    def teardown_class(self):
        time.sleep(2)
        # 关闭浏览器
        DriverUtils.quit_mp_driver()

    # 4.定义测试方法
    @parameterized.expand(build_data())
    def test_login(self, mobile, code):
        # 5.定义测试数据
        # mobile = "13911111111"
        # code = "246810"
        # 6.调用业务方法
        logging.info("{}用户开始执行登陆".format(mobile))
        self.login_proxy.test_login(mobile, code)
        # 7.执行断言结果
        logging.info("开始执行mp端登陆结果断言")
        is_suc = element_is_exist(driver=self.driver, text="江苏传智播客教育科技")
        assert is_suc
Пример #2
0
Файл: gan.py Проект: asadi8/GAN
def train(STEP_SIZE_DISCRIMINATOR, STEP_SIZE_G_D, BATCH_SIZE, TOTAL_ITERATIONS,
          NOISE_SIZE, D_UPDATES_PER_G_UPDATE, run):

    X_Y_train, X_train = utils.build_data()
    discriminator_network = discriminator.discriminator_class(
        STATE_SHAPE, STEP_SIZE_DISCRIMINATOR)
    generator_network = generator.generator_class(STATE_SHAPE, NOISE_SIZE)
    g_d_network = g_d.g_d_network(generator_network, discriminator_network,
                                  STEP_SIZE_G_D)

    for iteration_number in range(TOTAL_ITERATIONS):
        print("iteration #:", iteration_number)

        #update discriminator "D_UPDATES_PER_G_UPDATE" times
        for count in range(D_UPDATES_PER_G_UPDATE):
            discriminator_loss, fake_X_Y, real_X_Y = discriminator_network.update(
                NOISE_SIZE, BATCH_SIZE, X_train, X_Y_train, generator_network)

        #update generator once
        g_d_loss = g_d_network.update(NOISE_SIZE, BATCH_SIZE, X_train,
                                      discriminator_network)

        #save sampled fake and real images
        utils.save_image(iteration_number, fake_X_Y, "fake",
                         "gan-output-" + str(run))
        utils.save_image(iteration_number, real_X_Y, "real",
                         "gan-output-" + str(run))
Пример #3
0
Файл: wgan.py Проект: asadi8/GAN
def train(STEP_SIZE_CRITIC, STEP_SIZE_G_C, BATCH_SIZE, TOTAL_ITERATIONS,
          NOISE_SIZE, C_UPDATES_PER_G_UPDATE, CLIP_THRESHOLD, run):

    X_Y_train, X_train = utils.build_data()
    critic_network = critic.critic_class(STATE_SHAPE, STEP_SIZE_CRITIC,
                                         CLIP_THRESHOLD)
    generator_network = generator.generator_class(STATE_SHAPE, NOISE_SIZE)
    g_c_network = g_c.g_c_network(generator_network, critic_network,
                                  STEP_SIZE_G_C)

    for iteration_number in range(TOTAL_ITERATIONS):
        print("iteration #:", iteration_number)

        #update critic "C_UPDATES_PER_G_UPDATE" times
        for count in range(C_UPDATES_PER_G_UPDATE):
            critic_loss, fake_X_Y, real_X_Y = critic_network.update(
                NOISE_SIZE, BATCH_SIZE, X_train, X_Y_train, generator_network)

        #update generator once
        g_c_loss = g_c_network.update(NOISE_SIZE, BATCH_SIZE, X_train,
                                      critic_network)

        #save sampled fake and real images
        utils.save_image(iteration_number, fake_X_Y, "fake",
                         "wgan-output-" + str(run))
def main(run_id, data_path, score_as_pyfunc, score_as_tensorflow_lite):
    print("Options:")
    for k, v in locals().items():
        print(f"  {k}: {v}")

    utils.dump(run_id)
    data, _, _, _ = utils.build_data(data_path)

    model_uri = f"runs:/{run_id}/keras-hd5-model"
    predict_keras(model_uri, data)
    if score_as_pyfunc:
        predict_pyfunc(model_uri, data, "keras-hd5-model")

    model_name = "tensorflow-model"
    if artifact_exists(run_id, model_name):
        predict_tensorflow_model(run_id, data)
    else:
        print(f"No model: {model_name}")

    if score_as_tensorflow_lite:
        model_name = "tensorflow-lite-model"
        if artifact_exists(run_id, model_name):
            predict_tensorflow_lite_model(run_id, data)
        else:
            print(f"No model: {model_name}")

    model_name = "onnx-model"
    if artifact_exists(run_id, model_name):
        model_uri = f"runs:/{run_id}/{model_name}"
        predict_onnx(model_uri, data)
        predict_pyfunc(model_uri, data, "onnx-model")
    else:
        print(f"No model: {model_name}")
Пример #5
0
def build_and_train_model(n=1000, epochs=70, step_size=0.01):
    print('\nGenerating training, validation and test sets of size {} each.'.format(n))
    x_train, y_train = build_data(n)
    x_validation, y_validation = build_data(n)
    x_test, y_test = build_data(n)
    print('Building the model\n')
    model = build_model()
    model.summary()
    print('\nTraining the model on {} epochs'.format(epochs))
    t = time()
    model.fit(x_train, y_train, x_validation, y_validation, epochs=epochs, step_size=step_size)
    t = int(time()- t)
    print('\nTraining time : {0:0.0f} seconds ({1:0.3f} seconds per epoch)'.format(t, t/epochs))
    print('\nTesting the trained model :')
    te_predictions, te_loss = model.predict(x_test, y_test)
    te_accuracy = (te_predictions == y_test).sum() / y_test.shape[1] / te_predictions.shape[0]
    print('Test loss : {}'.format(te_loss.mean()))
    print('Test accuracy : {}'.format(te_accuracy))
Пример #6
0
class TestMisLogin:
    def setup_class(self):
        self.driver = DriverUtils.get_mis_driver()
        self.mis_login_proxy = MisLoginProxy()

    @pytest.mark.parametrize(
        ("mis_username", "mis_password", "text"),
        build_data(
            r"F:\黑马\黑马就业班\UI自动化测试\课堂练习\day11\HM_TT_UI_TEST\data\mis_data\mis_login_data.json"
        ))
    def test_mix_login(self, mis_username, mis_password, text):
        self.mis_login_proxy.mis_login(mis_username, mis_password)
        assert is_element_exist(self.driver, text)
Пример #7
0
def train(epochs, batch_size, autolog, log_as_onnx):
    print("autolog:", autolog)
    x_train, y_train, x_test, y_test = utils.build_data()
    model = build_model()
    print("model:", type(model))

    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()
    model.fit(x_train,
              y_train,
              epochs=epochs,
              batch_size=batch_size,
              verbose=0)

    test_loss, test_acc = model.evaluate(x_test, y_test)
    print("test_acc:", test_acc)
    print("test_loss:", test_loss)

    if not autolog:
        mlflow.log_param("epochs", epochs)
        mlflow.log_param("batch_size", batch_size)

        mlflow.log_metric("test_acc", test_acc)
        mlflow.log_metric("test_loss", test_loss)

        mlflow.keras.log_model(model, "keras-model")
        #mlflow.tensorflow.log_model(model, "tensorflow-model")

        # write model as yaml file
        with open("model.yaml", "w") as f:
            f.write(model.to_yaml())
        mlflow.log_artifact("model.yaml")

        # write model summary
        summary = []
        model.summary(print_fn=summary.append)
        summary = '\n'.join(summary)
        with open("model_summary.txt", "w") as f:
            f.write(summary)
        mlflow.log_artifact("model_summary.txt")

        # MLflow - log onnx model
        if log_as_onnx:
            import onnx_utils
            onnx_utils.log_model(model, "onnx-model")

    predictions = model.predict_classes(x_test)
    print("predictions:", predictions)
Пример #8
0
def PLA_3_times_with_30_data(m, b, num, times):
    # build 2D data
    x, y = build_data(m, b, num)

    #initial weight w = (0, 0, 0)
    w = np.zeros([1, 3])

    # count the iteration total numbers
    iteration_count = 0

    # initial the figure
    plt_fig = plt_proc(x, num, title="HW1-1: PLA with 30 2D data samples")

    # plot the sample line equation
    plt_fig.add_line(w=None,
                     m=m,
                     b=b,
                     num=num,
                     iteration=None,
                     label=f"Benchmark",
                     txt="")

    for i in range(times):

        # run PLA algorithm
        w_result, iteration = PLA(x, y, w, num)

        # verify the line equation
        verification(x, y, w_result, num, iteration, show=True)

        # count the total number of iterations
        iteration_count += iteration

        # plot the line equation
        plt_fig.add_line(w=w_result,
                         m=None,
                         b=None,
                         num=num,
                         iteration=iteration,
                         label=f"{i}",
                         txt=f", iteration = {iteration}")

    print(f"Avg. Iteration = {iteration_count/3:.3f}")

    # save and show the figure
    plt_fig.save_and_show(itr_avg=iteration_count / 3,
                          filename='hw1-1.png',
                          avg_show=True)
Пример #9
0
def train(run, model_name, epochs, batch_size, mlflow_custom_log, log_as_onnx):
    x_train, y_train, x_test, y_test = utils.build_data()
    model = build_model()

    model.compile(
        optimizer='rmsprop',
        loss='categorical_crossentropy',
        metrics=['accuracy'])
    model.summary()
    model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, verbose=0)

    test_loss, test_acc = model.evaluate(x_test, y_test)
    print("test_acc:", test_acc)
    print("test_loss:", test_loss)

    if mlflow_custom_log:
        mlflow.log_param("epochs", epochs)
        mlflow.log_param("batch_size", batch_size)

        mlflow.log_metric("test_acc", test_acc)
        mlflow.log_metric("test_loss", test_loss)
        mlflow.keras.log_model(model, "keras-model", registered_model_name=model_name)

        # write model summary
        summary = []
        model.summary(print_fn=summary.append)
        summary = '\n'.join(summary)
        with open("model_summary.txt", "w") as f:
            f.write(summary)
        mlflow.log_artifact("model_summary.txt")
    else:
        utils.register_model(run, model_name)

    # write model as yaml file
    with open("model.yaml", "w") as f:
        f.write(model.to_yaml())
    mlflow.log_artifact("model.yaml")

    # MLflow - log onnx model
    if log_as_onnx:
        import onnx_utils
        mname = f"{model_name}_onnx" if model_name else None
        onnx_utils.log_model(model, "onnx-model", mname)

    predictions = model.predict_classes(x_test)
    print("predictions:", predictions)
Пример #10
0
class TestMp:
    def setup_class(self):
        self.driver = DriverUtils.get_mp_driver()
        self.login_proxy = LoginProxy()

    def teardown_class(self):
        DriverUtils.quit_mp_driver()

    #设置用例级别
    @allure.severity(allure.severity_level.BLOCKER)
    @pytest.mark.parametrize(
        ("username", "code", "text"),
        build_data(
            r"F:\黑马\黑马就业班\UI自动化测试\课堂练习\day11\HM_TT_UI_TEST\data\mp_data\mp_login_data.json"
        ))
    def test_login(self, username, code, text):
        self.login_proxy.login(username, code)
        assert is_element_exist(self.driver, text)
Пример #11
0
class TestLogin(unittest.TestCase):
    # 类级别的初始化的fixture
    # a.打开浏览器并且打开测试网址
    @classmethod
    def setUpClass(cls):
        # 创建的浏览器驱动对象
        cls.driver = DriverUtils.get_driver()
        # 创建首页业务层对象
        cls.home_proxy = HomeProxy()
        # 创建登陆业务层对象
        cls.login_proxy = LoginProxy()

    # 类级别的销毁的fixture
    # e.关闭浏览器
    @classmethod
    def tearDownClass(cls):
        DriverUtils.quit_driver()

    # 方法级别的初始化fixture
    # 每个方法在运行之前都会回到首页
    def setUp(self):
        time.sleep(2)
        self.driver.get("http://localhost/")

    # 3.定义测试方法
    @parameterized.expand(build_data("../data/test_login_data.json"))
    def test_login(self, username, password, code, expect, is_suc):
        print("username={}, password={}, code={}, expect={}, is_suc={}".format(
            username, password, code, expect, is_suc))
        # b.在tpshop首页点击登陆的超链接
        logging.info("----------------------->开始跳转登陆页面")
        self.home_proxy.to_login_page()
        # c.执行登陆操作
        logging.info("----------------------->开始执行登陆操作")
        self.login_proxy.test_login(username, password, code)
        # 如果是反向的测试用例,获取是弹出提示信息
        if is_suc:
            # 如果是正向的测试用,获取是的页面的标题
            time.sleep(3)
            msg = self.driver.title
            self.assertIn(expect, msg)
        else:
            msg = get_msg()
            self.assertIn(expect, msg)
Пример #12
0
class TestPubAritcal:
    # 2.定义初始化方法
    def setup_class(self):
        self.driver = DriverUtils.get_mp_driver()
        #登陆
        self.login = LoginProxy()
        self.login.test_login("13911111111", "246810")

        self.home_proxy = HomeProxy()
        self.pub_ari_proxy = PubAriProxy()

    # 3.定义测试方法
    @pytest.mark.parametrize(
        "ar_cont,ch_name",
        build_data(
            "C:\\Users\Administrator\Desktop\测试资料\项目实战\\autoToutiaoTest\data\\test_pub_artical_data.json"
        ))
    def test_pub_artical(self, ar_cont, ch_name):
        # 定义测试数据
        ari_title = PUB_ARTICAL_TITLE
        ari_content = ar_cont
        option_name = ch_name
        logging.info("发布文章信息为文章标题={},文章内容={},文章频道={}".format(
            ari_title, ar_cont, ch_name))
        # 调用业务层方法
        logging.info("----->调用首页进入发布文章的业务方法")
        self.home_proxy.to_pub_ar_pg()
        logging.info("----->调用发布文章页面发布文章的业务方法")
        self.pub_ari_proxy.test_pub_aritcal(ari_title, ari_content,
                                            option_name)
        # 执行断言
        assert is_exists_element(self.driver, "新增文章成功")

    # 4.定义销毁方法
    def teardown_class(self):
        time.sleep(2)
        DriverUtils.quit_mp_driver()
Пример #13
0
class TestIHRMLogin(unittest.TestCase):

    # 初始化
    def setUp(self):
        self.login_api = TestLoginApi()

    def tearDown(self):
        ...

    filename = app.BASE_DIR + "/data/login_data.json"

    @parameterized.expand(build_data(filename))
    # 编写第一个案例,测试登录成功
    def test_login(self, name, jsonData, http_code, success, code, message):
        # IHRM项目可以直接发送登录请求
        headers = {"Content-Type": "application/json"}  # 定义请求头
        # 发送登录请求
        response = self.login_api.login(jsonData, headers)
        # 打印登录的结果
        result = response.json()
        logging.info("登录的结果为:{}".format(result))

        # # 断言登录的结果
        assert_common(http_code, success, code, message, response, self)
Пример #14
0
def main(args):
    # fix random seed
    random.seed(args.seed)
    os.environ['PYTHONHASHSEED'] = str(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True

    assert args.source not in args.target, 'Source domain can not be one of the target domains'

    # create train configurations
    config = utils.build_config(args)
    # prepare data
    dsets, dset_loaders = utils.build_data(config)
    # set base network
    net_config = config['encoder']
    base_network = net_config["name"](**net_config["params"])
    base_network = base_network.to(DEVICE)
    print(base_network)
    # set GNN classifier
    classifier_gnn = graph_net.ClassifierGNN(in_features=base_network.bottleneck.out_features,
                                             edge_features=config['edge_features'],
                                             nclasses=base_network.fc.out_features,
                                             device=DEVICE)
    classifier_gnn = classifier_gnn.to(DEVICE)
    print(classifier_gnn)

    # train on source domain and compute domain inheritability
    log_str = '==> Step 1: Pre-training on the source dataset ...'
    utils.write_logs(config, log_str)

    base_network, classifier_gnn = trainer.train_source(config, base_network, classifier_gnn, dset_loaders)
    log_str = '==> Finished pre-training on source!\n'
    utils.write_logs(config, log_str)

    log_str = '==> Step 2: Curriculum learning ...'
    utils.write_logs(config, log_str)

    ######## Stage 1: find the closest target domain ##########
    temp_test_loaders = dict(dset_loaders['target_test'])
    max_inherit_domain = trainer.select_closest_domain(config, base_network, classifier_gnn, temp_test_loaders)

    # iterate over all domains
    for _ in range(len(config['data']['target']['name'])):
        log_str = '==> Starting the adaptation on {} ...'.format(max_inherit_domain)
        utils.write_logs(config, log_str)
        ######## Stage 2: adapt to the chosen target domain having the maximum inheritance/similarity ##########
        base_network, classifier_gnn = trainer.adapt_target(config, base_network, classifier_gnn,
                                                            dset_loaders, max_inherit_domain)
        log_str = '==> Finishing the adaptation on {}!\n'.format(max_inherit_domain)
        utils.write_logs(config, log_str)

        ######### Stage 3: obtain the target pseudo labels and upgrade source domain ##########
        trainer.upgrade_source_domain(config, max_inherit_domain, dsets,
                                      dset_loaders, base_network, classifier_gnn)

        ######### Sage 1: recompute target domain inheritability/similarity ###########
        # remove already considered domain
        del temp_test_loaders[max_inherit_domain]
        # find the maximum inheritability/similarity domain
        if len(temp_test_loaders.keys()) > 0:
            max_inherit_domain = trainer.select_closest_domain(config, base_network,
                                                                       classifier_gnn, temp_test_loaders)
    ######### Step 3: fine-tuning stage ###########
    log_str = '==> Step 3: Fine-tuning on pseudo-source dataset ...'
    utils.write_logs(config, log_str)
    config['source_iters'] = config['finetune_iters']
    base_network, classifier_gnn = trainer.train_source(config, base_network, classifier_gnn, dset_loaders)
    log_str = 'Finished training and evaluation!'
    utils.write_logs(config, log_str)

    # save models
    if args.save_models:
        torch.save(base_network.cpu().state_dict(), os.path.join(config['output_path'], 'base_network.pth.tar'))
        torch.save(classifier_gnn.cpu().state_dict(), os.path.join(config['output_path'], 'classifier_gnn.pth.tar'))
Пример #15
0
from argparse import ArgumentParser
import mlflow
import mlflow.onnx
import utils
import onnx_utils

print("MLflow Version:", mlflow.__version__)
print("Tracking URI:", mlflow.tracking.get_tracking_uri())

if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("--model_uri", dest="model_uri", help="model_uri", default="../../data/train/wine-quality-white.csv")
    args = parser.parse_args()
    print("Arguments:")
    for arg in vars(args):
        print(f"  {arg}: {getattr(args, arg)}")

    _,_,data,_  = utils.build_data()
    model = mlflow.onnx.load_model(args.model_uri)
    print("model.type:", type(model))

    predictions = onnx_utils.score_model(model, data)
    print("predictions.type:",type(predictions))
    print("predictions.shape:",predictions.shape)
    print("predictions:",predictions)
Пример #16
0
if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("--model_uri",
                        dest="model_uri",
                        help="model_uri",
                        required=True)
    parser.add_argument("--data_path",
                        dest="data_path",
                        help="data_path",
                        default="../../data/wine-quality-white.csv")
    args = parser.parse_args()
    print("Arguments:")
    for arg in vars(args):
        print(f"  {arg}: {getattr(args, arg)}")

    X, y = utils.build_data(args.data_path)

    print("\n=== mlflow.catboost.load_model")
    model = mlflow.sklearn.load_model(args.model_uri)
    print("model:", type(model))
    predictions = model.predict(X)
    print("predictions.type:", type(predictions))
    print("predictions.shape:", predictions.shape)
    print("predictions:", predictions)

    print("\n=== mlflow.pyfunc.load_model")
    model = mlflow.pyfunc.load_model(args.model_uri)
    print("model:", type(model))
    predictions = model.predict(X)
    print("predictions.type:", type(predictions))
    print("predictions.shape:", predictions.shape)
Пример #17
0
def main(args):
    # fix random seed
    random.seed(args.seed)
    os.environ['PYTHONHASHSEED'] = str(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True

    # create train configurations
    args.use_cgct_mask = True  # used in CGCT for pseudo label mask in target datasets
    config = utils.build_config(args)
    # prepare data
    dsets, dset_loaders = utils.build_data(config)
    # set base network
    net_config = config['encoder']
    base_network = net_config["name"](**net_config["params"])
    base_network = base_network.to(DEVICE)
    print(base_network)
    # set GNN classifier
    classifier_gnn = graph_net.ClassifierGNN(
        in_features=base_network.bottleneck.out_features,
        edge_features=config['edge_features'],
        nclasses=base_network.fc.out_features,
        device=DEVICE)
    classifier_gnn = classifier_gnn.to(DEVICE)
    print(classifier_gnn)

    # train on source domain
    log_str = '==> Step 1: Pre-training on the source dataset ...'
    utils.write_logs(config, log_str)

    base_network, classifier_gnn = trainer.train_source(
        config, base_network, classifier_gnn, dset_loaders)

    log_str = '==> Finished pre-training on source!\n'
    utils.write_logs(config, log_str)

    # create random layer and adversarial network
    class_num = config['encoder']['params']['class_num']
    random_layer = networks.RandomLayer([base_network.output_num(), class_num],
                                        config['random_dim'], DEVICE)
    adv_net = networks.AdversarialNetwork(config['random_dim'],
                                          config['random_dim'],
                                          config['ndomains'])
    random_layer = random_layer.to(DEVICE)
    adv_net = adv_net.to(DEVICE)
    print(random_layer)
    print(adv_net)

    # run adaptation episodes
    log_str = '==> Starting the adaptation'
    utils.write_logs(config, log_str)
    for curri_iter in range(len(config['data']['target']['name'])):
        ######## Step 1: train one adaptation episod on combined target domains ##########
        target_train_datasets = preprocess.ConcatDataset(
            dsets['target_train'].values())
        dset_loaders['target_train'] = DataLoader(
            dataset=target_train_datasets,
            batch_size=config['data']['target']['batch_size'],
            shuffle=True,
            num_workers=config['num_workers'],
            drop_last=True)

        base_network, classifier_gnn = trainer.adapt_target_cgct(
            config, base_network, classifier_gnn, dset_loaders, random_layer,
            adv_net)

        log_str = '==> Finishing {} adaptation episode!\n'.format(curri_iter)
        utils.write_logs(config, log_str)

        ######### Step 2: obtain the target pseudo labels and upgrade target domains ##########
        trainer.upgrade_target_domains(config, dsets, dset_loaders,
                                       base_network, classifier_gnn,
                                       curri_iter)

    ######### Step 3: fine-tuning stage ###########
    log_str = '==> Step 3: Fine-tuning on pseudo-source dataset ...'
    utils.write_logs(config, log_str)

    config['source_iters'] = config['finetune_iters']
    base_network, classifier_gnn = trainer.train_source(
        config, base_network, classifier_gnn, dset_loaders)

    log_str = 'Finished training and evaluation!'
    utils.write_logs(config, log_str)

    # save models
    if args.save_models:
        torch.save(base_network.cpu().state_dict(),
                   os.path.join(config['output_path'], 'base_network.pth.tar'))
        torch.save(
            classifier_gnn.cpu().state_dict(),
            os.path.join(config['output_path'], 'classifier_gnn.pth.tar'))
Пример #18
0
# 导包
Пример #19
0
# =============================================================================
# split train and test
# =============================================================================
# Train/Test per doc
corpus_train, corpus_test = corpus_split(corpus, split=SPLIT)

# =============================================================================
# Build Tensor Data
# =============================================================================
# Create tensor data from corpus
print('\n---\nBuild Tensor data')
# Train datasets
print('\nBuild Train data:')
train_x, train_y = build_data(corpus_train,
                              char_to_n,
                              max_seq=MAX_SEQ,
                              stride=STRIDE)

if len(corpus_test):
    print('\nBuild Test data:')
    test_x, test_y = build_data(corpus_test,
                                char_to_n,
                                max_seq=MAX_SEQ,
                                stride=STRIDE)
else:
    test_x, test_y = None, None

# =============================================================================
# Save proccess data
# =============================================================================
Пример #20
0
#For reproducibility
torch.set_grad_enabled(False)
torch.manual_seed(0)
np.random.seed(0)

rounds = 10

input_units = 2
hidden_units = 25
output_units = 2

epochs = 50
mini_batch_size = 5

X_train, y_train = build_data(1000)  #(1000,2)
X_test, y_test = build_data(1000)  #(1000,2)

print(
    'Start training with parameters : {0} rounds, {1} epochs and {2} batch size'
    .format(rounds, epochs, mini_batch_size))

result_rounds = []  #training_losses, training_acc, test_losses, test_acc

time1 = time.perf_counter()
for i in range(rounds):

    print("Training round {0} : ".format(i + 1))
    model = Sequential(Linear(input_units, hidden_units), ReLU(),
                       Linear(hidden_units, hidden_units), ReLU(),
                       Linear(hidden_units, hidden_units), ReLU(),
from utils import build_data, vectorization
from model import build_model
from keras.callbacks import ModelCheckpoint
import io
import os

if __name__ == "__main__":
    text = io.open('Model_data/shakespear.txt',
                   encoding='utf-8').read().lower()

    Tx = 40
    chars = sorted(list(set(text)))
    char_indices = dict((c, i) for i, c in enumerate(chars))
    indices_char = dict((i, c) for i, c in enumerate(chars))

    print("Creating training set...")
    X, Y = build_data(text, Tx, stride=1)
    print("Vectorizing training set...")
    x, y = vectorization(X, Y, n_x=len(chars), char_indices=char_indices)

    model = build_model(x, y)
    filepath = os.path.join(os.getcwd(), "Model_checkpoints/model.h5")
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    model.fit(x, y, batch_size=128, epochs=20, callbacks=[checkpoint])
Пример #22
0
def train(run, model_name, data_path, epochs, batch_size, mlflow_custom_log,
          log_as_onnx, log_as_tensorflow_lite, log_as_tensorflow_js):
    print("mlflow_custom_log:", mlflow_custom_log)
    x_train, _, y_train, _ = utils.build_data(data_path)

    ncols = x_train.shape[1]

    def baseline_model():
        model = Sequential()
        model.add(
            Dense(ncols,
                  input_dim=ncols,
                  kernel_initializer='normal',
                  activation='relu'))
        model.add(Dense(1, kernel_initializer='normal'))
        model.compile(loss='mean_squared_error', optimizer='adam')
        return model

    model = baseline_model()

    if mlflow_custom_log:
        print("Logging with mlflow.log")
        mlflow.log_param("epochs", epochs)
        mlflow.log_param("batch_size", batch_size)
        mlflow.keras.log_model(model,
                               "tensorflow-model",
                               registered_model_name=model_name)
    else:
        utils.register_model(run, model_name)

    # MLflow - log as ONNX model
    if log_as_onnx:
        import onnx_utils
        mname = f"{model_name}_onnx" if model_name else None
        onnx_utils.log_model(model, "onnx-model", model_name=mname)

    # Save as TensorFlow Lite format
    if log_as_tensorflow_lite:
        converter = tf.lite.TFLiteConverter.from_keras_model(model)
        tflite_model = converter.convert()
        path = "model.tflite"
        with open(path, "wb") as f:
            f.write(tflite_model)
        mlflow.log_artifact(path, "tensorflow-lite-model")

    # Save as TensorFlow.js format
    if log_as_tensorflow_js:
        import tensorflowjs as tfjs
        path = "model.tfjs"
        tfjs.converters.save_keras_model(model, path)
        mlflow.log_artifact(path, "tensorflow-js-model")

    # Evaluate model
    estimator = KerasRegressor(build_fn=baseline_model,
                               epochs=epochs,
                               batch_size=batch_size,
                               verbose=0)
    kfold = KFold(n_splits=10)
    results = cross_val_score(estimator, x_train, y_train, cv=kfold)
    print(
        f"Baseline MSE: mean: {round(results.mean(),2)}  std: {round(results.std(),2)}"
    )
    if mlflow_custom_log:
        mlflow.log_metric("mse_mean", results.mean())
        mlflow.log_metric("mse_std", results.std())

    # Score
    data = x_train
    predictions = model.predict(data)
    predictions = pd.DataFrame(data=predictions, columns=["prediction"])
    print("predictions.shape:", predictions.shape)
    print("predictions:", predictions)
Пример #23
0
import pandas as pd
import mlflow
import mlflow.pyfunc
import utils

print("MLflow Version:", mlflow.__version__)
print("Tracking URI:", mlflow.tracking.get_tracking_uri())

if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("--model_uri",
                        dest="model_uri",
                        help="model_uri",
                        default="../../data/train/wine-quality-white.csv")
    args = parser.parse_args()
    print("Arguments:")
    for arg in vars(args):
        print(f"  {arg}: {getattr(args, arg)}")

    model = mlflow.pyfunc.load_model(args.model_uri)
    print("model:", model)

    _, _, ndarray, _ = utils.build_data()
    data = pd.DataFrame(ndarray)
    print("data.shape:", data.shape)

    predictions = model.predict(data)
    print("predictions.type:", type(predictions))
    print("predictions.shape:", predictions.shape)
    print("predictions:", predictions)
Пример #24
0
def pocket_vs_pla(m, b, num, threshold, continuous_threshold):
    # build 2D data
    x, y = build_data(m, b, num)

    #initial weight w = (0, 0, 0)
    w = np.zeros([1, 3])

    # initial the figure
    plt_fig = plt_proc(
        x, num, title="HW1-2: Pocket v.s. PLA with 1000 2D data samples")

    # plot the sample line equation
    plt_fig.add_line(w=None,
                     m=m,
                     b=b,
                     num=num,
                     iteration=None,
                     label=f"Benchmark",
                     txt="")

    # set the start time of PLA algorithm
    PLA_start = time()

    # run PLA algorithm
    w_PLA, iteration_PLA = PLA(x, y, w, num)

    # get the execution time of PLA
    PLA_exe_time = time() - PLA_start

    PLA_error_rate = verification(x, y, w_PLA, num, iteration=None, show=False)

    # plot the PLA line equation
    plt_fig.add_line(
        w=w_PLA,
        m=None,
        b=None,
        num=num,
        iteration=iteration_PLA,
        label=f"PLA",
        txt=
        f"\n        error rate = {PLA_error_rate:.03f}, iteration = {iteration_PLA}, exec. time = {PLA_exe_time:.03f}"
    )

    # set the start time of PLA algorithm
    Pocket_start = time()

    # run Pocket algorithm
    w_Pocket, iteration_Pocket = Pocket(x, y, w, num, threshold,
                                        continuous_threshold)

    # get the execution time of PLA
    Pocket_exe_time = time() - Pocket_start

    Pocket_error_rate = verification(x,
                                     y,
                                     w_Pocket,
                                     num,
                                     iteration_Pocket,
                                     show=False)

    # plot the Pocket line equation
    plt_fig.add_line(
        w=w_Pocket,
        m=None,
        b=None,
        num=num,
        iteration=iteration_Pocket,
        label=f"Pocket",
        txt=
        f"\n        error rate = {Pocket_error_rate:.03f}, iteration = {iteration_Pocket}, exec. time = {Pocket_exe_time:.03f}"
    )

    print(f"PLA execution time = {PLA_exe_time:.5f} seconds")
    print(f"PLA Iteration = {iteration_PLA}")
    print(f"PLA error rate = {PLA_error_rate}\n")

    print(f"Pocket execution time = {Pocket_exe_time:.5f} seconds")
    print(f"Pocket Iteration = {iteration_Pocket}")
    print(f"Pocket error rate = {Pocket_error_rate:.03f}")

    # save and show the figure
    plt_fig.save_and_show(itr_avg=None, filename='hw1-2.png', avg_show=False)
Пример #25
0
def artifact_exists(run_id, path):
    return len(client.list_artifacts(run_id, path)) > 0

if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("--run_id", dest="run_id", help="run_id", required=True)
    parser.add_argument("--data_path", dest="data_path", help="Data path", default="../../data/train/wine-quality-white.csv")
    parser.add_argument("--score_as_pyfunc", dest="score_as_pyfunc", help="Score as PyFunc", default=False, action='store_true')
    args = parser.parse_args()
    print("Arguments:")
    for arg in vars(args):
        print(f"  {arg}: {getattr(args, arg)}")
    run_id = args.run_id

    utils.dump(run_id)
    data,_,_,_  = utils.build_data(args.data_path)

    model_uri = f"runs:/{run_id}/keras-hd5-model"
    predict_keras(model_uri, data)
    if args.score_as_pyfunc:
        predict_pyfunc(model_uri, data)

    model_name = "onnx-model"
    if artifact_exists(run_id, model_name):
        model_uri = f"runs:/{run_id}/{model_name}"
        predict_onnx(model_uri, data)
        predict_pyfunc(model_uri, data)
    else:
        print(f"No model: {model_name}")

    model_name = "tensorflow-model"