示例#1
0
def main():
    if len(sys.argv) < 4:
        print('Usage: %s %s %s %s' %
              (sys.argv[0], ['padding', 'sum'], 'epochs', 'learning_rate'))
        exit(0)
    MODE = sys.argv[1]
    EPOCHS = int(sys.argv[2])
    LEARNING_RATE = float(sys.argv[3])

    data_pth = '../data/mode_%s.npz' % MODE
    model_name = '%s_mode_%s_epochs_%s_lr_%s_%s' % (
        sys.argv[0].split('/')[-1][:-3], MODE, EPOCHS, LEARNING_RATE,
        time.strftime("%Y.%m.%d.%H.%M.%S", time.localtime()))
    modeldir = '../model/Conv1D/%s' % model_name
    os.makedirs(modeldir, exist_ok=True)

    #
    # load data
    #
    x_train, y_train, x_test, y_test, class_weights_dict = _data(
        data_pth, split_val=False, verbose=1)

    #
    # train and save
    #
    if USEGPU:
        config_tf(user_mem=2500, cuda_rate=0.2)
    model, history_dict = TrainConv1D(x_train,
                                      y_train,
                                      class_weights_dict=class_weights_dict,
                                      epochs=EPOCHS,
                                      lr=LEARNING_RATE)
    net_saver(model, modeldir, history_dict)

    #
    # test
    #
    y_test, p_pred, y_real, y_pred = net_predictor(modeldir,
                                                   x_test,
                                                   y_test,
                                                   Onsave=True)
    acc, f1, mcc, recalls, precisions, f1s, mccs = test_score(y_real=y_real,
                                                              y_pred=y_pred,
                                                              classes=10)
    print('\nacc: %s'
          '\nf1: %s'
          '\nmcc: %s'
          '\nrecalls: %s'
          '\nprecisions: %s'
          '\nf1s: %s'
          '\nmccs: %s' % (acc, f1, mcc, recalls, precisions, f1s, mccs))
    print('\nThe Hypers are: mode_%s_epochs_%s_lr_%s' %
          (MODE, EPOCHS, LEARNING_RATE))
示例#2
0
def main():
    lookback = 600  # 考虑过去的600个点中的100个(600/6=100)
    step = 6  # 采样周期为6秒
    delay = 6  # 预测6秒后的数据
    batch_size = 128
    train_num = 20000
    val_num = 5000
    test_num = None
    modeldir = '../model'
    #
    # prepare data
    #
    float_data = parse_data()
    train_gen, _ = gen_data(float_data,
                            lookback,
                            step,
                            delay,
                            batch_size,
                            min_idx=0,
                            max_idx=train_num + 1)
    val_gen, val_steps = gen_data(float_data,
                                  lookback,
                                  step,
                                  delay,
                                  batch_size,
                                  min_idx=train_num + 1,
                                  max_idx=train_num + val_num + 1)
    test_gen, _ = gen_data(float_data,
                           lookback,
                           step,
                           delay,
                           batch_size=5000,
                           min_idx=train_num + val_num + 1,
                           max_idx=len(float_data) - delay - 1)
    x_test, y_test = next(test_gen)

    #
    # train and save
    #
    if USEGPU:
        config_tf(user_mem=2500, cuda_rate=0.2)
    model, history_dict = GRU(train_gen, val_gen, val_steps, input_dim=4)
    net_saver(model, modeldir, history_dict)

    #
    # test
    #
    y_real, y_pred = net_predictor(modeldir, x_test, y_test, Onsave=True)

    print('\n y_real: %s' '\n y_pred: %s' % (y_real, y_pred))
示例#3
0
def grid_search():
    mode_lst = ['padding', 'sum']
    epochs_lst = [200]  #cuz early stopping exists
    learning_rate_lst = [0.1, 0.01, 0.001, 0.0001, 0.00001]

    verbose = 0

    hyper_tag_lst = []
    acc_lst = []
    for mode in mode_lst:
        for epochs in epochs_lst:
            for learning_rate in learning_rate_lst:
                data_pth = '../data/mode_%s.npz' % mode
                x_train, y_train, x_test, y_test, x_val, y_val, class_weights_dict = _data(
                    data_pth, split_val=True, verbose=verbose)
                if USEGPU:
                    config_tf(user_mem=2500, cuda_rate=0.2)
                model, history_dict = TrainConv1D(x_train,
                                                  y_train,
                                                  x_val,
                                                  y_val,
                                                  class_weights_dict,
                                                  filepth=None,
                                                  epochs=epochs,
                                                  lr=learning_rate,
                                                  verbose=verbose)
                acc = history_dict['val_acc'][-1]
                early_epochs = len(history_dict['val_acc'])
                hyper_tag_lst.append('%s_%s_%s' %
                                     (mode, early_epochs, learning_rate))
                print('\nmode_%s_epochs_%s_lr_%s' %
                      (mode, epochs, learning_rate))
                acc_lst.append(acc)
                print('val_acc:', acc)
                # sys.stdout.flush()# or "python -u“
    print('\nThe Best Hypers are: %s, Best val_acc is: %s' %
          (hyper_tag_lst[acc_lst.index(max(acc_lst))], max(acc_lst)))
                        default='')

    parser.add_argument('-mo',
                        '--model_output',
                        help='Where to save the trained model?',
                        default='/work/fcn_models/')

    parser.add_argument('-id', '--exp_id', help='Experiment id', default='')

    return parser.parse_args()


######################################################################################

args = parse_args()
utils.config_tf()
exp_id_file = args.model_output + 'Exp_ID.csv'

# create experimental directory
if args.exp_id == '':
    # use date + number of exps so far today
    today, exp_id = utils.get_exp_id(exp_id_file)
    model_output_dir = args.model_output + str(today) + '_' + str(
        exp_id).zfill(2)
else:
    model_output_dir = args.model_output + args.exp_id

if not os.path.exists(model_output_dir):
    os.makedirs(model_output_dir)

# set vars
示例#5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--list_path',
        type=str,
        required=True,
        help=
        '[str], list file path for train trajectories, each row in the list file represents a trajectory'
    )
    parser.add_argument(
        '--valid_ratio',
        type=float,
        default=0.2,
        help=
        '[float], split @valid_ratio data from train_data as validation set, default value is "0.2"'
    )
    parser.add_argument(
        '--lookback',
        type=int,
        default=600,
        help=
        '[int], how long the historical data be seen back, default value is "600 (seconds)"'
    )
    parser.add_argument(
        '--step',
        type=int,
        default=6,
        help='[int], sampling interval, default value is "6 (seconds)"')
    parser.add_argument(
        '--delay',
        type=int,
        default=6,
        help=
        '[int], predicting data after @delay seconds, default value is "6 (seconds)"'
    )
    parser.add_argument(
        '--model_name',
        type=str,
        default='model',
        help='[str], the mame of your trained model, default value is "model"')
    parser.add_argument(
        '--batch_size',
        type=int,
        default=128,
        help='[int], training batch_size, dafault value is "128"')
    parser.add_argument('--epochs',
                        type=int,
                        default=50,
                        help='[int], training epochs, dafault value is "50"')
    parser.add_argument(
        '--gpu_mem',
        type=int,
        default=0,
        help=
        '[int], whether GPU memory is used or not, dafault value is "0 (Mib)"')
    args = parser.parse_args()

    list_path = args.list_path
    valid_ratio = args.valid_ratio
    lookback = args.lookback  # 考虑过去的 600 个点中的100个(600/6=100)[lookback=600, step=6]
    step = args.step  # 采样周期(秒)
    delay = args.delay  # 预测 6 秒后的数据[delay=6]
    model_name = args.model_name
    batch_size = args.batch_size
    epochs = args.epochs
    gpu_mem = args.gpu_mem

    modeldir = '../model'
    os.makedirs(modeldir, exist_ok=True)

    #
    # prepare data
    #
    trajectory_dict = parse_data(list_path)
    normalized_trajectory_dict = normalization(trajectory_dict,
                                               valid_ratio=valid_ratio)

    #
    # train and save
    #
    if gpu_mem > 1:
        config_tf(user_mem=gpu_mem)

    for k, v in normalized_trajectory_dict.items():
        train_num = round(v.shape[0] * (1 - valid_ratio))
        train_gen = generator(data=v,
                              lookback=lookback,
                              delay=delay,
                              step=step,
                              min_index=0,
                              max_index=train_num + 1,
                              shuffle=False,
                              batch_size=batch_size)
        steps_per_epoch = (train_num - lookback) // batch_size

        valid_gen = generator(data=v,
                              lookback=lookback,
                              delay=delay,
                              step=step,
                              min_index=train_num + 1,
                              max_index=v.shape[0] - delay - 1,
                              shuffle=False,
                              batch_size=batch_size)
        val_steps = (len(v) - delay - 1 - train_num - 1 - 1 -
                     lookback) // batch_size

        model, history_dict = RNN(train_gen,
                                  valid_gen,
                                  val_steps,
                                  input_dim=v.shape[1],
                                  epochs=epochs,
                                  steps_per_epoch=steps_per_epoch)

    net_saver(model, modeldir, model_name, history_dict)