示例#1
0
文件: main.py 项目: ratosh/bichano
def train(args):
    yaml_file = yaml.safe_load(args.cfg)
    print(yaml.dump(yaml_file, default_flow_style=False))
    cfg = TrainingConfig(yaml_file)

    output_dir = cfg.output
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    chunks = get_all_chunks(cfg.input)
    print("Found {} files".format(len(chunks)))
    num_train_chunks = int(len(chunks) * cfg.train_ratio)
    training_chunks = chunks[:num_train_chunks]
    test_chunks = chunks[num_train_chunks:]
    print("Chunks Training({}) Testing({})".format(len(training_chunks),
                                                   len(test_chunks)))
    train_loader = BatchLoader(training_chunks, cfg)
    test_loader = BatchLoader(test_chunks, cfg)
    worker = TensorWorker(cfg, train_loader, test_loader)
    print()
示例#2
0
        t_class = tf.squeeze(t_class, axis=-1)
        # Compute map
        cal_map(p_bbox, p_labels, p_scores, np.zeros((138, 138, len(p_bbox))),
                np.array(t_bbox), np.array(t_class),
                np.zeros((138, 138, len(t_bbox))), ap_data, iou_thresholds)
        print(f"Computing map.....{it}", end="\r")
        it += 1
        #if it > 100:
        #    break

    # Compute the mAp over all thresholds
    calc_map(ap_data, iou_thresholds, class_names, print_result=True)


if __name__ == "__main__":

    physical_devices = tf.config.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(physical_devices[0], True)

    config = TrainingConfig()
    args = training_config_parser().parse_args()
    config.update_from_args(args)

    # Load the model with the new layers to finetune
    detr = build_model(config)

    valid_dt = load_coco("val", 1, config, augmentation=None)

    # Run training
    eval_model(detr, config, CLASS_NAME, valid_dt)
示例#3
0
def train(config,
          evaluate_only=False,
          outdir=".",
          detail=False,
          azureml=False):

    filename = config.model.filename
    categories_file = config.dataset.categories
    wav_directory = config.dataset.path
    batch_size = config.training.batch_size
    hidden_units = config.model.hidden_units
    architecture = config.model.architecture
    num_layers = config.model.num_layers
    use_gpu = config.training.use_gpu

    run = None

    if azureml:
        from azureml.core.run import Run
        run = Run.get_context()
        if run is None:
            print("### Run.get_context() returned None")
        else:
            print("### Running in Azure Context")

    valid_layers = [1, 2, 3]
    if num_layers not in valid_layers:
        raise Exception(
            "--num_layers can only be one of these values {}".format(
                valid_layers))

    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    if not filename:
        filename = "{}{}KeywordSpotter.pt".format(architecture, hidden_units)
        config.model.filename = filename

    # load the featurized data
    if not os.path.isdir(wav_directory):
        print("### Error: please specify valid --dataset folder location: {}".
              format(wav_directory))
        sys.exit(1)

    if not categories_file:
        categories_file = os.path.join(wav_directory, "categories.txt")

    with open(categories_file, "r") as f:
        keywords = [x.strip() for x in f.readlines()]

    training_file = os.path.join(wav_directory, "training_list.npz")
    testing_file = os.path.join(wav_directory, "testing_list.npz")
    validation_file = os.path.join(wav_directory, "validation_list.npz")

    if not os.path.isfile(training_file):
        print("Missing file {}".format(training_file))
        print("Please run make_datasets.py")
        sys.exit(1)
    if not os.path.isfile(validation_file):
        print("Missing file {}".format(validation_file))
        print("Please run make_datasets.py")
        sys.exit(1)
    if not os.path.isfile(testing_file):
        print("Missing file {}".format(testing_file))
        print("Please run make_datasets.py")
        sys.exit(1)

    model = None

    device = torch.device("cpu")
    if use_gpu:
        if torch.cuda.is_available():
            device = torch.device("cuda")
        else:
            print("### CUDA not available!!")

    print("Loading {}...".format(testing_file))
    test_data = AudioDataset(testing_file, config.dataset, keywords)

    log = None
    if not evaluate_only:
        print("Loading {}...".format(training_file))
        training_data = AudioDataset(training_file, config.dataset, keywords)

        print("Loading {}...".format(validation_file))
        validation_data = AudioDataset(validation_file, config.dataset,
                                       keywords)

        if training_data.mean is not None:
            fname = os.path.join(outdir, "mean.npy")
            print("Saving {}".format(fname))
            np.save(fname, training_data.mean)
            fname = os.path.join(outdir, "std.npy")
            print("Saving {}".format(fname))
            np.save(fname, training_data.std)

            # use the training_data mean and std variation
            test_data.mean = training_data.mean
            test_data.std = training_data.std
            validation_data.mean = training_data.mean
            validation_data.std = training_data.std

        print("Training model {}".format(filename))
        model = create_model(config.model, training_data.input_size,
                             training_data.num_keywords)
        if device.type == 'cuda':
            model.cuda()  # move the processing to GPU

        start = time.time()
        log = model.fit(training_data, validation_data, config.training,
                        config.model, device, detail, run)
        end = time.time()

        passed, total, rate = model.evaluate(training_data, batch_size, device)
        print("Training accuracy = {:.3f} %".format(rate * 100))

        torch.save(model.state_dict(), os.path.join(outdir, filename))

    print(
        "Evaluating {} keyword spotter using {} rows of featurized test audio..."
        .format(architecture, test_data.num_rows))
    if model is None:
        msg = "Loading trained model with input size {}, hidden units {} and num keywords {}"
        print(
            msg.format(test_data.input_size, hidden_units,
                       test_data.num_keywords))
        model = create_model(config.model, test_data.input_size,
                             test_data.num_keywords)
        model.load_dict(torch.load(filename))
        if model and device.type == 'cuda':
            model.cuda()  # move the processing to GPU

    results_file = os.path.join(outdir, "results.txt")
    passed, total, rate = model.evaluate(test_data, batch_size, device,
                                         results_file)
    print("Testing accuracy = {:.3f} %".format(rate * 100))

    if not evaluate_only:
        name = os.path.splitext(filename)[0] + ".onnx"
        print("saving onnx file: {}".format(name))
        model.export(os.path.join(outdir, name), device)

        config.dataset.sample_rate = test_data.sample_rate
        config.dataset.input_size = test_data.audio_size
        config.dataset.num_filters = test_data.input_size
        config.dataset.window_size = test_data.window_size
        config.dataset.shift = test_data.shift

        logdata = {
            "accuracy_val": rate,
            "training_time": end - start,
            "log": log
        }
        d = TrainingConfig.to_dict(config)
        logdata.update(d)

        logname = os.path.join(outdir, "train_results.json")
        save_json(logdata, logname)

    return rate, log
示例#4
0
        logname = os.path.join(outdir, "train_results.json")
        save_json(logdata, logname)

    return rate, log


def str2bool(v):
    if v is None:
        return False
    lower = v.lower()
    return lower in ["t", "1", "true", "yes"]


if __name__ == '__main__':
    config = TrainingConfig()
    parser = argparse.ArgumentParser(
        "train a GRU based neural network for keyword spotting")

    # all the training parameters
    parser.add_argument("--epochs", help="Number of epochs to train", type=int)
    parser.add_argument(
        "--lr_scheduler",
        help=
        "Type of learning rate scheduler (None, TriangleLR, CosineAnnealingLR,"
        " ExponentialLR, ExponentialResettingLR)")
    parser.add_argument(
        "--learning_rate",
        help="Default learning rate, and maximum for schedulers",
        type=float)
    parser.add_argument("--lr_min",
示例#5
0
    d = TrainingConfig.to_dict(config)
    logdata.update(d)

    logname = os.path.join(os.path.dirname(filename), "train_results.json")
    save_json(logdata, logname)

    return rate, log


def str2bool(s):
    s = s.lower()
    return s in ["t", "true", "yes", "1"]


if __name__ == '__main__':
    config = TrainingConfig()
    parser = argparse.ArgumentParser(
        "train a GRU based neural network for keyword spotting")

    # all the training parameters
    parser.add_argument("--epochs", help="Number of epochs to train", type=int)
    parser.add_argument(
        "--lr_scheduler",
        help=
        "Type of learning rate scheduler (None, TriangleLR, CosineAnnealingLR,"
        " ExponentialLR, ExponentialResettingLR)")
    parser.add_argument(
        "--learning_rate",
        help="Default learning rate, and maximum for schedulers",
        type=float)
    parser.add_argument("--lr_min",
示例#6
0
    parser.add_argument("--mean_path", help="Path to train dataset mean", type=str)
    parser.add_argument("--std_path", help="Path to train dataset std", type=str)

    args = parser.parse_args()

    # FastGRNN Parameters
    config_path = args.config_path
    fastgrnn_model_path = args.model_path
    fastgrnn_mean_path = args.mean_path
    fastgrnn_std_path = args.std_path
    
    mean = np.load(fastgrnn_mean_path)
    std = np.load(fastgrnn_std_path)

    # Load FastGRNN
    config = TrainingConfig()
    config.load(config_path)
    fastgrnn = create_model(config.model, num_filt, 35)
    fastgrnn.load_state_dict(torch.load(fastgrnn_model_path, map_location=torch.device('cpu')))
    fastgrnn.normalize(None, None)
    
    # Start streaming prediction
    pred = PredictionThread()
    rec = RecordingThread()
    
    pred.start()
    rec.start()

    pred.join()
    rec.join()
示例#7
0
        frame = frame / 255
        frame = numpy_bbox_to_image(frame,
                                    predicted_bbox,
                                    labels=predicted_labels,
                                    scores=predicted_scores,
                                    class_name=CLASS_NAME)

        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()


if __name__ == "__main__":

    physical_devices = tf.config.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(physical_devices[0], True)

    config = TrainingConfig()
    args = training_config_parser().parse_args()
    config.update_from_args(args)

    # Load the model with the new layers to finetune
    detr = get_detr_model(config, include_top=True, weights="detr")
    config.background_class = 91

    # RUn webcam inference
    run_webcam_inference(detr)