Пример #1
0
def train_model():
    # Divide up into cars and notcars
    # images = glob.iglob('*.jpeg', recursive=True)
    cars = glob.iglob(os.path.join(DATA_PATH, "vehicles", "**", "*.png"),
                      recursive=True)
    notcars = glob.iglob(os.path.join(DATA_PATH, "non-vehicles", "**",
                                      "*.png"),
                         recursive=True)
    # for image in images:
    #     if 'image' in image or 'extra' in image:
    #         notcars.append(image)
    #     else:
    #         cars.append(image)

    with log_time("extract car features"):
        car_features = Parallel(n_jobs=-1, max_nbytes=None, verbose=5)(
            delayed(extract_feature_from_path)(img_path) for img_path in cars)
    with log_time("extract noncar features"):
        notcar_features = Parallel(n_jobs=-1, max_nbytes=None, verbose=5)(
            delayed(extract_feature_from_path)(img_path)
            for img_path in notcars)

    # Create an array stack of feature vectors
    X = np.vstack((car_features, notcar_features)).astype(np.float64)
    # Fit a per-column scaler
    X_scaler = StandardScaler().fit(X)
    # Apply the scaler to X
    scaled_X = X_scaler.transform(X)

    # Define the labels vector
    y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))

    # Split up data into randomized training and test sets
    rand_state = np.random.randint(0, 100)
    X_train, X_test, y_train, y_test = train_test_split(
        scaled_X, y, test_size=0.2, random_state=rand_state)

    print('Using:', orient, 'orientations', pix_per_cell,
          'pixels per cell and', cell_per_block, 'cells per block')
    print('Feature vector length:', len(X_train[0]))
    # Use a linear SVC
    svc = LinearSVC()
    # Check the training time for the SVC
    with log_time("train"):
        svc.fit(X_train, y_train)
    # Check the score of the SVC
    print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
    # Check the prediction time for a single sample
    n_predict = 10
    with log_time("evaluate", n_predict, "items"):
        predicts = svc.predict(X_test[0:n_predict])
    print('My SVC predicts: ', predicts)
    print('For these', n_predict, 'labels: ', y_test[0:n_predict])

    with open(MODEL_PATH, "wb") as f:
        pickle.dump([svc, X_scaler], f)
Пример #2
0
        logging.info('Model: {}'.format(model_ckpt))
        torch.save(clf.state_dict(), model_ckpt)


if __name__ == '__main__':
    model = 'check_train'
    args = parse()
    model = '{}_{}{}_optim_{}_ei_{}_epochs_{}'.format(model, args.arch,
                                                      args.num_layers,
                                                      args.optimizer,
                                                      len(args.lr_clfs),
                                                      args.n_epochs)
    pr_time, fl_time = time_stp()
    logger(args.expt, model)

    log_time('Start', pr_time)
    sep()
    logging.info(json.dumps(args.__dict__, indent=2))

    main(
        expt=args.expt,
        model_name=model,
        device=args.device,
        gpu_id=args.gpu_id,
        optimizer=args.optimizer,
        arch=args.arch,
        num_layers=args.num_layers,
        n_classes=args.n_classes,
        img_size=args.img_size,
        batch_size=args.batch_size,
        test_batch_size=args.test_batch_size,
Пример #3
0
    choices=['kddcup', 'kddcup_neptune', 'nsl_kdd', 'nb15', 'gureKDD']
)

parser.add_argument(
    '--num_runs',
    type=int,
    default=10,
    help='Number of runs'
)

args = parser.parse_args()
DATA_SET = args.DATA_SET
num_runs = args.num_runs
LOG_FILE = 'log_results_{}.txt'.format(DATA_SET)
LOGGER = utils.get_logger(LOG_FILE)
utils.log_time(LOGGER)
LOGGER.info(DATA_SET)
results = []
for n in range(1,num_runs+1):
    mean_aupr, std = execute_run(DATA_SET)
    results.append(mean_aupr)
    LOGGER.info(' Run {}: Mean: {:4f} | Std {:4f}'.format(n,mean_aupr,std))
mean_all_runs = np.mean(results)
print('Mean AuPR over  {} runs {:4f}'.format(num_runs, mean_all_runs))
print('Details: ', results)

LOGGER.info('Mean AuPR over  {} runs {:4f} Std {:4f}'.format(num_runs, mean_all_runs, np.std(results)))
LOGGER.info(' Details ' + str(results))
utils.close_logger(LOGGER)

Пример #4
0
    print('Start Session')
    sess.run(init)
    sess.run(local_init)
    writer = tf.summary.FileWriter(
        os.path.join(FLAGS.checkpointDir,
                     'tensorboard/' + FLAGS.exp + '/train/'), sess.graph)
    twriter = tf.summary.FileWriter(
        os.path.join(FLAGS.checkpointDir,
                     'tensorboard/' + FLAGS.exp + '/test/'), sess.graph)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)
    step = 0
    try:
        print('Start Train')
        while not coord.should_stop():
            with utils.log_time() as log:
                for i in range(2):
                    Xone, X1, X2, Rcate_list, final_out, fc2, loss_v, X, X_last, label_oh, pred_logit, positive_score, pred_binary, label_value, summary, _, step = sess.run(
                        [
                            m.Xone, m.X1, m.X2, m.Rcate_list, m.final_out,
                            m.fc2, m.loss, m.X, m.X_last, m.label_oh,
                            m.pred_logit, m.positive_score, m.pred_binary,
                            m.label_value, m.summary_op, m.optimizer,
                            m.global_step
                        ],
                        options=run_options,
                        run_metadata=run_metadata)
                log.write(u'iteration: %d' % step)
                print('loss:', loss_v)
                writer.add_summary(summary, step)