Exemplo n.º 1
0
def debug(args):

    # prepare predictor
    sess_init = SaverRestore(args.model_path)
    model = Model('train')
    predict_config = PredictConfig(
        session_init=sess_init,
        model=model,
        input_names=['imgs', 'gt_heatmaps', 'gt_pafs', 'mask'],
        output_names=['vgg_features', 'HeatMaps', 'PAFs', 'cost'])
    predict_func = OfflinePredictor(predict_config)

    ds = Data('train', False)

    g = ds.get_data()
    sample = next(g)

    import pdb
    pdb.set_trace()

    sample = [np.expand_dims(e, axis=0) for e in sample]

    vgg_features, heatmap, paf, cost = predict_func(sample)

    import pdb
    pdb.set_trace()
Exemplo n.º 2
0
def get_data(train_or_test, batch_size):
    is_train = train_or_test == 'train'

    filename_list = cfg.train_list if is_train else cfg.test_list
    ds = Data(filename_list,
              shuffle=is_train,
              flip=is_train,
              random_crop=is_train,
              test_set=not is_train)
    sample_num = ds.size()

    if is_train:
        augmentors = [
            imgaug.RandomOrderAug([
                imgaug.BrightnessScale((0.6, 1.4), clip=False),
                imgaug.Contrast((0.6, 1.4), clip=False),
                imgaug.Saturation(0.4, rgb=False),
                imgaug.Lighting(
                    0.1,
                    eigval=np.asarray([0.2175, 0.0188, 0.0045][::-1]) * 255.0,
                    eigvec=np.array([[-0.5675, 0.7192, 0.4009],
                                     [-0.5808, -0.0045, -0.8140],
                                     [-0.5836, -0.6948, 0.4203]],
                                    dtype='float32')[::-1, ::-1])
            ]),
        ]
    else:
        augmentors = []


#    ds = AugmentImageComponent(ds, augmentors)
    ds = BatchData(ds, batch_size, remainder=not is_train)
    if is_train:
        ds = PrefetchDataZMQ(ds, min(6, multiprocessing.cpu_count()))
    return ds, sample_num
Exemplo n.º 3
0
def chronoamp_ECSA(data: Data, mass: float = 0) -> float:
    x = data.get_time()
    y = data.get_current()

    baseline = calc_baseline(x, y, start=50, end=200)

    charge = integrate_peak(x, y, start=0, end=50, baseline=baseline)
    # TODO: integrate with electrode object
    # get factor and mass info from electrode
    factor = 210e-6 * 1e4  # (C / cm^2) * (cm^2 / m^2) = C / m^2
    area = charge / factor
    if mass:
        area /= mass
    # m^2_Pt / g_Pt
    return area
Exemplo n.º 4
0
def get_data(train_or_test):
    isTrain = train_or_test == 'train'
    ds = Data(train_or_test, affine_trans=isTrain, hflip=isTrain, warp=isTrain)
    if isTrain:
        augmentors = [
            imgaug.RandomOrderAug([
                imgaug.Brightness(30, clip=False),
                imgaug.Contrast((0.8, 1.2), clip=False),
                imgaug.Saturation(0.4),
                # rgb-bgr conversion
                imgaug.Lighting(0.1,
                                eigval=[0.2175, 0.0188, 0.0045][::-1],
                                eigvec=np.array([[-0.5675, 0.7192, 0.4009],
                                                 [-0.5808, -0.0045, -0.8140],
                                                 [-0.5836, -0.6948, 0.4203]],
                                                dtype='float32')[::-1, ::-1])
            ]),
        ]
    else:
        augmentors = []
    ds = AugmentImageComponent(ds, augmentors)
    ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)
    if isTrain:
        ds = PrefetchData(ds, 3, 2)
    return ds
Exemplo n.º 5
0
def get_data(train_or_test, multi_scale, batch_size):
    isTrain = train_or_test == 'train'

    filename_list = cfg.train_list if isTrain else cfg.test_list
    ds = Data(filename_list,
              shuffle=isTrain,
              flip=isTrain,
              affine_trans=isTrain,
              use_multi_scale=isTrain and multi_scale,
              period=batch_size * 10)

    if isTrain:
        augmentors = [
            imgaug.RandomOrderAug([
                imgaug.Brightness(30, clip=False),
                imgaug.Contrast((0.8, 1.2), clip=False),
                imgaug.Saturation(0.4),
                imgaug.Lighting(0.1,
                                eigval=[0.2175, 0.0188, 0.0045][::-1],
                                eigvec=np.array([[-0.5675, 0.7192, 0.4009],
                                                 [-0.5808, -0.0045, -0.8140],
                                                 [-0.5836, -0.6948, 0.4203]],
                                                dtype='float32')[::-1, ::-1])
            ]),
            imgaug.Clip(),
            imgaug.ToUint8()
        ]
    else:
        augmentors = [imgaug.ToUint8()]
    ds = AugmentImageComponent(ds, augmentors)
    ds = BatchData(ds, batch_size, remainder=not isTrain)
    if isTrain and multi_scale == False:
        ds = PrefetchDataZMQ(ds, min(6, multiprocessing.cpu_count()))
    return ds
Exemplo n.º 6
0
def get_data(train_or_test, batch_size):
    isTrain = train_or_test == 'train'
    filename_list = cfg.train_list if isTrain else cfg.test_list
    ds = Data(filename_list, shuffle = isTrain, affine_trans = isTrain)
    ds = BatchData(ds, batch_size, remainder = not isTrain)
    if isTrain:
        ds = PrefetchDataZMQ(ds, min(6, multiprocessing.cpu_count()))

    return ds
Exemplo n.º 7
0
def get_data(train_or_test, batch_size):
    is_train = train_or_test == 'train'
    ds = Data(cfg.filename, test_set=not is_train)

    augmentors = []
    ds = BatchData(ds, batch_size, remainder=not is_train)
    if is_train:
        ds = PrefetchDataZMQ(ds, min(6, multiprocessing.cpu_count()))
    return ds
Exemplo n.º 8
0
def get_data(train_or_test, batch_size):
    is_train = train_or_test == 'train'

    ds = Data(train_or_test)
    sample_num = ds.size()

    if is_train:
        augmentors = [
            imgaug.RandomOrderAug([
                imgaug.Brightness(30, clip=False),
                imgaug.Contrast((0.8, 1.2), clip=False),
                imgaug.Saturation(0.4),
                imgaug.Lighting(0.1,
                                eigval=[0.2175, 0.0188, 0.0045][::-1],
                                eigvec=np.array([[-0.5675, 0.7192, 0.4009],
                                                 [-0.5808, -0.0045, -0.8140],
                                                 [-0.5836, -0.6948, 0.4203]],
                                                dtype='float32')[::-1, ::-1])
            ]),
            # imgaug.Clip(),
            imgaug.ToUint8()
        ]

    else:
        augmentors = [imgaug.ToUint8()]
    ds = AugmentImageComponent(ds, augmentors)

    if is_train:
        ds = PrefetchDataZMQ(ds, min(8, multiprocessing.cpu_count()))
    ds = BatchData(ds, batch_size, remainder=not is_train)
    return ds, sample_num
Exemplo n.º 9
0
def get_data(train_or_test, batch_size):
    isTrain = train_or_test == 'train'
    ds = Data(train_or_test, shuffle=isTrain)
    if isTrain:
        augmentors = cfg.augmentors
    else:
        augmentors = []
    ds = AugmentImageComponent(ds, augmentors)
    ds = CTCBatchData(ds, batch_size)
    if isTrain:
        # ds = PrefetchDataZMQ(ds, min(6, multiprocessing.cpu_count()))
        ds = PrefetchDataZMQ(ds, 1)
    return ds
Exemplo n.º 10
0
def get_data(train_or_test):
    isTrain = train_or_test == 'train'

    filename_list = cfg.train_list if isTrain else cfg.test_list
    ds = Data(train_or_test)

    if isTrain:
        augmentors = []
    else:
        augmentors = []
    ds = AugmentImageComponent(ds, augmentors)
    if isTrain:
        ds = PrefetchDataZMQ(ds, min(6, multiprocessing.cpu_count()))
    ds = BatchData(ds, cfg.batch_size, remainder=not isTrain)
    return ds
Exemplo n.º 11
0
def get_data(train_or_test, batch_size):
    is_train = train_or_test == 'train'

    filename_list = cfg.train_list if is_train else cfg.test_list
    ds = Data(filename_list, rotate=False, flip_ver=is_train, flip_horiz=is_train, shuffle=is_train)

    sample_num = ds.size()

    augmentors = [
        # random rotate and flip should be applied to both input and label, thus cannot be added here
        imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01),
        imgaug.ToUint8()
    ]
    ds = AugmentImageComponent(ds, augmentors)

    if is_train:
        ds = PrefetchDataZMQ(ds, min(8, multiprocessing.cpu_count()))
    ds = BatchData(ds, batch_size, remainder = not is_train)
    return ds, sample_num
Exemplo n.º 12
0
def main(args):
    # Dataset functions
    vocab = Vocabulary('./data/vocabulary.json', padding=args.padding)
    kb_vocab = Vocabulary('./data/vocabulary.json', padding=4)
    print('Loading datasets.')
    training = Data(args.training_data, vocab, kb_vocab)
    validation = Data(args.validation_data, vocab, kb_vocab)
    training.load()
    validation.load()
    training.transform()
    training.kb_out()
    validation.transform()
    validation.kb_out()
    print('Datasets Loaded.')
    print('Compiling Model.')

    model = KVMMModel(pad_length=args.padding,
                      embedding_size=args.embedding,
                      vocab_size=vocab.size(),
                      batch_size=batch_size,
                      n_chars=vocab.size(),
                      n_labels=vocab.size(),
                      encoder_units=200,
                      decoder_units=200).to(device)

    print(model)
    model_optimizer = optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss()

    plot_losses = []
    print_loss_total = 0  # Reset every print_every
    plot_loss_total = 0  # Reset every plot_every
    print_every = 100
    start = time.time()
    n_iters = 500000

    iter = 0
    while iter < n_iters:
        training_data = training.generator(batch_size)
        input_tensors = training_data[0][0]
        target_tensors = training_data[1]
        kbs = training_data[0][1]
        iter += 1
        loss = train(input_tensors, target_tensors, kbs, model,
                     model_optimizer, criterion, vocab, kb_vocab)
        print_loss_total += loss
        plot_loss_total += loss
        if iter % print_every == 0:
            validation_data = validation.generator(batch_size)
            validation_inputs = validation_data[0][0]
            validation_kbs = validation_data[0][1]
            validation_targets = validation_data[1]
            accuracy = evaluate(model, validation_inputs, validation_targets,
                                validation_kbs)
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print('%s (%d %d%%) %.4f - val_accuracy %f' %
                  (timeSince(start, iter / n_iters), iter,
                   iter / n_iters * 100, print_loss_avg, accuracy))
            torch.save(model.state_dict(), 'model_weights.pytorch')
Exemplo n.º 13
0
        K.all(K.equal(K.max(y_true, axis=-1),
                      K.cast(K.argmax(y_pred, axis=-1), K.floatx())),
              axis=1))


# Configuration
training_data = './training.csv'
validation_data = './validation.csv'

# Dataset functions
input_vocab = Vocabulary('./human_vocab.json', padding=config.padding)
output_vocab = Vocabulary('./machine_vocab.json', padding=config.padding)

print('Loading datasets...')

training = Data(training_data, input_vocab, output_vocab)
validation = Data(validation_data, input_vocab, output_vocab)
training.load()
validation.load()
training.transform()
validation.transform()

print('Datasets Loaded.')


def build_models(pad_length=config.padding,
                 n_chars=input_vocab.size(),
                 n_labels=output_vocab.size(),
                 embedding_learnable=False,
                 encoder_units=32,
                 decoder_units=32,
Exemplo n.º 14
0
def main(args):
    # Dataset functions
    vocab = Vocabulary(args.vocabulary_data, padding=args.padding)
    kb_vocab = Vocabulary(args.vocabulary_data, padding=4)  # 7
    print('Loading datasets.')
    # Callback.__init__(self)
    if args.training_data.find("schedule") != -1:
        train_file_name = "schedule"
    elif args.training_data.find("navigate") != -1:
        train_file_name = "navigate"
    elif args.training_data.find("weather") != -1:
        train_file_name = "weather"
    elif args.training_data.find("ubuntu") != -1:
        train_file_name = "ubuntu"
    elif args.training_data.find("original") != -1:
        train_file_name = "original"
    else:
        train_file_name = "unknown"
    if args.save_path == "default":
        args.save_path = "weights/model_weights_" + train_file_name
    training = Data(args.training_data, vocab, kb_vocab,
                    args.generated_training_data)
    validation = Data(args.validation_data, vocab, kb_vocab)
    training.load()
    validation.load()
    training.transform()
    training.kb_out()
    validation.transform()
    validation.kb_out()
    print('Datasets Loaded.')
    print('Compiling Model.')

    model = KVMMModel(pad_length=args.padding,
                      embedding_size=args.embedding,
                      vocab_size=vocab.size(),
                      batch_size=batch_size,
                      n_chars=vocab.size(),
                      n_labels=vocab.size(),
                      encoder_units=200,
                      decoder_units=200).to(device)

    print(model)
    # Training using Adam Optimizer
    model_optimizer = optim.Adam(model.parameters(), lr=0.001)
    # Training using cross-entropy loss
    criterion = nn.CrossEntropyLoss()

    plot_losses = []
    print_loss_total = 0  # Reset every print_every
    plot_loss_total = 0  # Reset every plot_every
    print_every = 100
    save_every = 10000
    start = time.time()
    n_iters = 500000

    iter = 0
    while iter < n_iters:
        training_data = training.generator(batch_size)
        input_tensors = training_data[0][0]
        target_tensors = training_data[1]
        kbs = training_data[0][1]
        iter += 1
        loss = train(input_tensors, target_tensors, kbs, model,
                     model_optimizer, criterion, vocab, kb_vocab)
        print_loss_total += loss
        plot_loss_total += loss
        if iter % print_every == 0:
            validation_data = validation.generator(batch_size)
            validation_inputs = validation_data[0][0]
            validation_kbs = validation_data[0][1]
            validation_targets = validation_data[1]
            accuracy = evaluate(model, validation_inputs, validation_targets,
                                validation_kbs)
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print('%s (%d %d%%) %.4f - val_accuracy %f' %
                  (timeSince(start, iter / n_iters), iter,
                   iter / n_iters * 100, print_loss_avg, accuracy))
            if iter % save_every == 0:
                torch.save(model.state_dict(),
                           args.save_path + "_iter_" + str(iter) + ".pytorch")
Exemplo n.º 15
0
def main(args):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    # Dataset functions
    vocab = Vocabulary('./data/vocabulary.json', padding=args.padding)
    vocab = Vocabulary('./data/vocabulary.json', padding=args.padding)
    kb_vocab = Vocabulary('./data/vocabulary.json', padding=4)
    print('Loading datasets.')
    training = Data(args.training_data, vocab, kb_vocab)
    validation = Data(args.validation_data, vocab, kb_vocab)
    training.load()
    validation.load()
    training.transform()
    training.kb_out()
    validation.transform()
    validation.kb_out()
    print('Datasets Loaded.')
    print('Compiling Model.')

    model = memnn(pad_length=args.padding,
                  embedding_size=args.embedding,
                  vocab_size=vocab.size(),
                  batch_size=args.batch_size,
                  n_chars=vocab.size(),
                  n_labels=vocab.size(),
                  embedding_learnable=True,
                  encoder_units=200,
                  decoder_units=200,
                  trainable=True)

    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=[
                      'accuracy',
                  ])
    print('Model Compiled.')
    print('Training. Ctrl+C to end early.')

    try:
        model.fit_generator(generator=training.generator(args.batch_size),
                            steps_per_epoch=300,
                            validation_data=validation.generator(
                                args.batch_size),
                            validation_steps=10,
                            workers=1,
                            verbose=1,
                            epochs=args.epochs)

    except KeyboardInterrupt as e:
        print('Model training stopped early.')
    model.save_weights("model_weights_nkbb.hdf5")

    print('Model training complete.')