Esempio n. 1
0
def main():
    tbar_2 = tqdm.tqdm(TestDataLoader, ncols=120)
    predict = []
    for batch_idx, (x) in enumerate(tbar_2):
        pred = test(x)
        predict.append(pred)
        message = 'Testing: '
        tbar_2.set_description(message)
    util.write_result('test_predicted.txt', predict)
Esempio n. 2
0
File: main3.py Progetto: zengxyu/Mao
def train_test_all():
    preds_test_on_all_models = []
    preds_train_on_all_models = []

    model_val_loss_dict = {}
    weights = []

    for i, [
            prefix_name, ModelHelper, Model, special_param, PreprocessHelper,
            preprocess_param, weight
    ] in enumerate(model_config):
        print_seperater(prefix_name, Model, special_param)
        param['output'] = os.path.join(
            param['output_root_dir'],
            'model_save/output_{}'.format(prefix_name))
        param['model'] = Model

        param.update(special_param)

        param['out_data_dir'] = os.path.join(
            param['output_root_dir'],
            "out_data_dir_" + PreprocessHelper.__name__)

        weights.append(weight)
        total_train_preds, total_test_preds = train_test_once(
            ModelHelper, PreprocessHelper, preprocess_param)

        # 把train数据的预测结果写到文件中
        overall_val_loss = write_val_result(total_train_preds,
                                            root_dir=param['root_dir'],
                                            output_dir=param['output'])
        model_val_loss_dict[prefix_name] = [overall_val_loss, weight]

        # 将每次预测后的结果加到list中
        preds_test_on_all_models.append(total_test_preds)
        preds_train_on_all_models.append(total_train_preds)

    print("preds after repeating -- shape:",
          np.shape(preds_test_on_all_models))
    # 求均值

    preds_test_on_all_models = np.average(np.array(preds_test_on_all_models),
                                          axis=0,
                                          weights=weights)
    preds_train_on_all_models = np.average(np.array(preds_train_on_all_models),
                                           axis=0,
                                           weights=weights)
    # 将test的预测结果写到文件
    write_result(preds_test_on_all_models, root_dir=param['root_dir'])
    print("End -- model_val_loss_dict:", model_val_loss_dict)
    # 将train的测试结果写到文件
    overall_val_loss = write_val_result(preds_train_on_all_models,
                                        root_dir=param['root_dir'])
    model_val_loss_dict["overall"] = overall_val_loss
    # 将所有的val loss写到文件
    with open("val_loss002.pkl", 'wb') as f:
        pickle.dump(model_val_loss_dict, f)
def valid_and_anneal(dataSource):
    global minValMape
    global lr
    valMape, R2 = test(dataSource)
    if sum(valMape) < minValMape:
        minValMape = sum(valMape)
        print 'writing result and saving model...'
        util.write_result(valMape, R2,path=args.resultPath)
        with open(args.save,'wb') as f:
            torch.save((model_.state_dict(),hidden,args),f)
    else:
        lr /= 4.0
Esempio n. 4
0
def main():
    global args
    args = parser.parse_args()

    # fix random seeds
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)

    # load model
    # model = nn.DataParallel(Resnet18())
    # model.load_state_dict(torch.load(path))
    # model = model.module
    # cudnn.benchmark = True

    # net = MobileNetV1(num_classes=100,sobel=True)
    # model = load_model()
    # model.top_layer = nn.Linear(1000, 100)
    # print(model)
    # model.cuda()
    # cudnn.benchmark = True
    #
    # freeze the features layers

    model = load_model()
    model.top_layer = None
    model.classifier = nn.Linear(1024, 10)
    model.cuda()
    cudnn.benchmark = True

    for param in model.features.parameters():
        param.requires_grad = False

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
                                     std=[0.24703223, 0.24348512, 0.26158784])

    transformations_val = [
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(), normalize
    ]

    transformations_train = [
        transforms.Resize(256),
        transforms.CenterCrop(256),
        transforms.RandomCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ]

    train_dataset = datasets.ImageFolder(
        r'./dataset/train',
        transform=transforms.Compose(transformations_train))

    val_dataset = datasets.ImageFolder(
        r'./dataset/test', transform=transforms.Compose(transformations_val))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=int(args.batch_size /
                                                            2),
                                             shuffle=False,
                                             num_workers=args.workers)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=10**args.weight_decay)

    model_name = format(model.__class__.__name__)
    with open('./experiment_record(first)/' + model_name + '/result.txt',
              "w") as f:
        f.write("开始实验\n")  # 自带文件关闭功能,不需要再写f.close()

    # reglog = RegLog(args.conv, len(train_dataset.classes)).cuda()
    # optimizer = torch.optim.SGD(
    #     filter(lambda x: x.requires_grad, reglog.parameters()),
    #     args.lr,
    #     momentum=args.momentum,
    #     weight_decay=10**args.weight_decay
    # )

    # create logs
    # exp_log = os.path.join(args.exp, 'log')
    # if not os.path.isdir(exp_log):
    #     os.makedirs(exp_log)
    result = train(args.epochs, train_loader, model, criterion, optimizer,
                   val_loader)
    write_result(format(model.__class__.__name__), args.epochs,
                 args.batch_size, args.num_workers, args.lr, result,
                 args.weight_decay)
Esempio n. 5
0
    # 向forest中压入一定数量的tree
    for i in xrange(0, PARAMS["forestSize"]):
        forest.append(tree())
    # 根据forest数组生成一个rdd
    rdd = sc.parallelize(forest)
    # 对forest中每个tree运行train_tree的操作
    forest = rdd.map(train_tree_of_forest).collect()
    log("log;forest", "Finish training forest")
    # 开始预测
    # 读取预测数据
    log("log;data", "Start to read predict data!")
    predictRecords = get_data(predictDataPath)
    log("log;data", "Amount of predict records: " + str(len(predictRecords)))
    log("log;data", "Finish reading predict data!")
    # 用于记录预测结果的列表
    result = []
    # 遍历预测数据
    log("log;forest", "Start to predict!")
    # 将测试数据集转换成一个rdd
    rdd = sc.parallelize(predictRecords)
    # 将forest设置成共享变量
    shared_forest = sc.broadcast(forest)
    # 对测试数据中的每一条进行预测
    result = rdd.map(predict_tree_of_forest).collect()
    log("log;forest", "Finish predicting!")
    # 将预测结果写入结果文件中
    log("log;data", "Start to write the predict result!")
    write_result(resultPath, result)
    log("log;data", "Finish writing the predict result!")
    log("log;prog", "Terminal!")
Esempio n. 6
0
    rsi_overbought_bounds = [50]  # 3
    ema_values = [7, 42]  # 4
    targets = [600]  # 4
    stops = [300]  # 2
    overlaps = [True]  # 1

    # lots = int(input("Enter number of lots per signal\n"))
    # max_lots = int(input("Enter number of maximum lots open at any given time\n"))

    # lots, max_lots = 1, 1
    lots = 1
    max_lots = range(5, 20)

    for m_lot in max_lots:
        results = ohlc_backtest(bid,
                                ask,
                                rsi_windows,
                                rsi_oversold_bounds,
                                rsi_overbought_bounds,
                                ema_values,
                                targets,
                                stops,
                                overlaps,
                                lots,
                                m_lot,
                                filename_parent="new-test-1min")

        print(results)

        write_result(results, file="Finalresults-1min - {}.csv".format(m_lot))
Esempio n. 7
0
def main():
    try:
        pe = ParseExcel()
        pe.load_workbook(test_case_file_path)
        case_sheet_obj = pe.get_sheet_by_name("测试用例")
        # get total case number
        total_case_num = pe.get_end_row(case_sheet_obj) - 1
        to_execute_case_col_obj = pe.get_column_obj(case_sheet_obj,
                                                    case_to_execute_col)
        # define need to execute case number
        to_execute_case_num = 0
        # define sucessful case num
        sucess_case_num = 0
        for idx, to_execute_case_cell in enumerate(to_execute_case_col_obj[1:],
                                                   2):  #
            # to_execute_case_col[1:] 去掉字段行,列表是0,excel是2
            case_name = pe.get_cell_value(case_sheet_obj,
                                          row_no=idx,
                                          col_no=case_name_col)
            info("case_name: %s" % case_name)
            if to_execute_case_cell.value.lower() == "y":  # .value
                # y, need to execute, to execute case num +1
                to_execute_case_num += 1
                case_row_obj = pe.get_row_obj(case_sheet_obj, idx)
                # case_row_obj type is list, starts with 0, so need to -1
                case_framework = case_row_obj[case_framework_col - 1].value
                case_step_sheet_name = case_row_obj[case_step_sheet_name_col -
                                                    1].value
                info("case_framework: %s, case_step_sheet_name: %s" %
                     (case_framework, case_step_sheet_name))

                if case_framework == "关键字框架":
                    info("****************call keyword****************")
                    # get step sheet object via sheet name, all the steps must be executed
                    step_sheet_obj = pe.get_sheet_by_name(case_step_sheet_name)
                    # get total step number: max_row - 1
                    total_step_num = pe.get_end_row(step_sheet_obj) - 1
                    # define sucessful step number
                    sucess_step_num = 0

                    for step_idx in range(2, total_step_num + 2):  # ?
                        # since the first row is title, starts from the second row, so range(2, ...)
                        step_row_obj = pe.get_row_obj(step_sheet_obj, step_idx)
                        # get step description
                        step_description = step_row_obj[step_description_col -
                                                        1].value  # 数组[idx]
                        # get function name
                        step_keyword_function = step_row_obj[
                            step_keyword_function_col - 1].value
                        # get locate type
                        step_locate_type = step_row_obj[step_locate_type_col -
                                                        1].value
                        # get locate expression
                        step_locate_exp = step_row_obj[step_locate_exp_col -
                                                       1].value
                        # get parameters
                        step_operate_data = step_row_obj[step_operate_value_col
                                                         - 1].value
                        info(
                            "step_description: %s, step_keyword_function: %s, step_locate_type: %s, "
                            "step_locate_exp: %s, step_operate_data: %s" %
                            (step_description, step_keyword_function,
                             step_locate_type, step_locate_exp,
                             step_operate_data))

                        if isinstance(step_operate_data, int):
                            # if step operate data is int, transfer to ttr
                            step_operate_data = str(step_operate_data)

                        # contact command and execute
                        if step_keyword_function and step_locate_type and step_locate_exp and step_operate_data:
                            command = step_keyword_function + "('%s', '%s', '%s')" % (
                                step_locate_type, step_locate_exp,
                                step_operate_data)
                        elif step_keyword_function and step_locate_type and step_locate_exp:
                            command = step_keyword_function + "('%s', '%s')" % (
                                step_locate_type,
                                step_locate_exp,
                            )
                        elif step_keyword_function and step_operate_data:
                            command = step_keyword_function + "('%s')" % step_operate_data
                        elif step_keyword_function:
                            command = step_keyword_function + "()"  # "()"

                        try:
                            info(command)
                            eval(command)
                            sucess_step_num += 1
                            write_result(pe, step_sheet_obj, "Pass", step_idx,
                                         "test_step")
                            info("step [%s] pass" % step_description)
                        except Exception as e:
                            err_pic_path = screen_capture()
                            err_msg = traceback.format_exc()
                            write_result(pe, step_sheet_obj, "Fail", step_idx,
                                         "test_step", err_msg, err_pic_path)
                            info("step [%s] fail, error msg: %s" %
                                 (step_description, err_msg))

                    # test result statistics, 缩进!!!
                    if sucess_step_num == total_step_num:
                        sucess_case_num += 1
                        write_result(pe, case_sheet_obj, "Pass", idx,
                                     "test_case")
                        info("case [%s] pass" % case_name)
                    else:
                        write_result(pe, case_sheet_obj, "Fail", idx,
                                     "test_case")
                        info("case [%s] fail" % case_name)

                elif case_framework == "数据":
                    # 可扩展
                    info("****************call data****************")
                    pass

                else:
                    write_result(pe, case_sheet_obj, "", idx, "test_case")
                    info("case [%s] does not need to execute" % case_name)

            else:
                # does not need to execute, clear the time and result column
                write_result(pe, case_sheet_obj, "", idx, "test_case")
                info("total case number: %s, need to execute %s, pass: %s" %
                     (total_case_num, to_execute_case_num, sucess_case_num))

    except Exception as e:
        raise e