def load_model_then_predict(model_path="./model/pytorch-mnist/"):
    batch_size = 64
    test_data = dsets.MNIST(root='./data',
                            train=False,
                            transform=transforms.ToTensor())
    test_gen = torch.utils.data.DataLoader(dataset=test_data,
                                           batch_size=batch_size,
                                           shuffle=False)

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('device: ', device)
    x = test_data.data[:8]
    x_input = x.view(-1, 28 * 28).float()  # input here need to be float type

    input_numpy_array = x_input.cpu().numpy(
    )  # construct input dataframe (Only cpu type tensor can convert to df)
    input_df = pd.DataFrame(input_numpy_array)
    params = {"Append score columns to output": "False"}
    module = BuiltinScoreModule(model_path, params)
    #module = BuiltinScoreModule(model_path)
    result = module.run(input_df)
    print('=====buildinScoreModule=======')
    print(result)
    #result.columns = result.columns.astype(str)
    ioutil.save_parquet(result, './testOutputParquet/')
Пример #2
0
def test(model_path):
    df = prepare_input()
    out = test_builtin(model_path, df)
    ioutil.save_parquet(out, "outputs/stargan/model_output", True)
    print(out.columns)
    print(out)
    print(out["0"].shape)
Пример #3
0
def run(input_path, output_path, remvoe_columns):
    """
  This functions removes specified column from input
  """
    meta = {"Remove Columns": remvoe_columns}
    proccesor = Process(meta)
    df = ioutil.read_parquet(input_path)
    result = proccesor.run(df)
    ioutil.save_parquet(result, output_path)
Пример #4
0
def prepare_input():
    df = ioutil.read_parquet("../dstest/outputs/stargan/")
    results = []
    for index in range(len(df)):
        results.append([[0, 0, 0, 1, 0]])
    df.insert(len(df.columns), "c", results, True)
    print(df.columns)
    ioutil.save_parquet(df, "outputs/stargan/model_input", True)
    return df
Пример #5
0
def run(input_path, meta_path, output_path, file_name, prob_col):
    """
  read
  """

    meta = {
        "Category File Name": file_name,
        "Probability Column Name": prob_col
    }

    proccesor = Process(meta_path, meta)
    df = ioutil.read_parquet(input_path)
    result = proccesor.run(df)
    ioutil.save_parquet(result, output_path, True)
Пример #6
0
def run(input_path, output_path, image_column, target_column,
        target_datauri_column, target_image_size):
    """
  This functions read base64 encoded images from df. Transform to format required by model input.
  """
    meta = {
        "Image Column": image_column,
        "Target Column": target_column,
        "Target DataURI Column": target_datauri_column,
        "Target Image Size": target_image_size
    }
    proccesor = PreProcess(meta)

    df = ioutil.read_parquet(input_path)
    result = proccesor.run(df)
    ioutil.save_parquet(result, output_path)
Пример #7
0
def run(input_path, meta_path, output_path, file_name, prob_col,
        append_category_column_to_output):
    """
  read
  """

    meta = {
        CATEGORY_FILE_NAME_KEY: file_name,
        PROBABILITY_COLUMN_NAME_KEY: prob_col,
        APPEND_CATEGORY_COLUMN_TO_OUTPUT_KEY: append_category_column_to_output
    }

    proccesor = Process(meta_path, meta)
    df = ioutil.read_parquet(input_path)
    result = proccesor.run(df)
    print(result)
    ioutil.save_parquet(result, output_path, True)
Пример #8
0
def run(input_path, output_path):
    """
  This functions read images in an folder and encode it ans base64. Then save it as csv in output_path.
  """
    import glob
    types = ('**.jpg', '**.png')  # the tuple of file types
    files_grabbed = []
    for files in types:
        pattern = os.path.join(input_path, files)
        files_grabbed.extend(glob.glob(pattern))

    print(f"Got {len(files_grabbed)} files in folder {input_path}")
    print(files_grabbed)

    df = pd.DataFrame(columns=["label", "image"])
    for i in range(len(files_grabbed)):
        filename = files_grabbed[i]
        label = os.path.splitext(os.path.basename(filename))[0].split('_')[-1]
        image_64_encode = datauri_util.imgfile_to_datauri(filename)
        df.loc[i] = label, image_64_encode

    ioutil.save_parquet(df, output_path, True)