Example #1
0
def main():
    if len(sys.argv) == 1:
        usage(True)

    parser = argparse.ArgumentParser(usage=usage())
    parser.add_argument("--epochs", required=False, default=10, type=int)
    parser.add_argument("--rate", required=False, default=0, type=float)
    parser.add_argument("--visualize", required=False, action='store_true')
    args = parser.parse_args()
    model = MapModel(RESOLUTION, GRIDSIZE, args.rate)
    if args.visualize:
        if os.path.exists(WEIGHTSFILE):
            print('Loading weights from file %s...' % WEIGHTSFILE)
            model.model.load_weights(WEIGHTSFILE)
    else:
        if os.path.exists(MODELFILE):
            print('Loading model from file %s...' % MODELFILE)
            model.model = km.load_model(MODELFILE)
            if args.rate:
                model.model.optimizer.lr.set_value(args.rate)

    print('Reading data...')
    images = ut.get_data(SCRIPTPATH, (RESOLUTION, RESOLUTION))
    meta = ut.get_meta(SCRIPTPATH)
    output_class = ut.get_output_by_key(SCRIPTPATH, 'class')
    output_xyr = ut.get_output_by_key(SCRIPTPATH, 'xyr')
    # Normalize training and validation data by train data mean and std
    means, stds = ut.get_means_and_stds(images['train_data'])
    ut.normalize(images['train_data'], means, stds)
    ut.normalize(images['valid_data'], means, stds)

    train_output_class = ut.onehot(output_class['train_output'])
    train_output_xyr = output_xyr['train_output']
    valid_output_class = ut.onehot(output_class['valid_output'])
    valid_output_xyr = output_xyr['valid_output']

    if args.visualize:
        print('Dumping conv layer images to jpg')
        visualize(model, 'classconv', images['train_data'],
                  ['train/' + x for x in meta['train_filenames']])
        exit(0)

    # Train
    print('Start training...')
    model.train(images['train_data'], [train_output_class, train_output_xyr],
                images['valid_data'], [valid_output_class, valid_output_xyr],
                BATCH_SIZE, args.epochs)
    model.model.save_weights(WEIGHTSFILE)
    model.model.save(MODELFILE)
    model.print_results(images['valid_data'],
                        [valid_output_class, valid_output_xyr])
Example #2
0
def main():
    if len(sys.argv) == 1:
        usage(True)

    parser = argparse.ArgumentParser(usage=usage())
    parser.add_argument("--epochs", required=True, type=int)
    parser.add_argument("--rate", required=False, default=0, type=float)
    args = parser.parse_args()
    model = MapModel(RESOLUTION, GRIDSIZE, args.rate)
    images = ut.get_data(SCRIPTPATH, (RESOLUTION, RESOLUTION))
    output_class = ut.get_output_by_key(SCRIPTPATH, 'class')
    output_xyr = ut.get_output_by_key(SCRIPTPATH, 'xyr')
    # Normalize training and validation data by train data mean and std
    means, stds = ut.get_means_and_stds(images['train_data'])
    ut.normalize(images['train_data'], means, stds)
    ut.normalize(images['valid_data'], means, stds)

    train_output_class = ut.onehot(output_class['train_output'])
    train_output_xyr = output_xyr['train_output']
    valid_output_class = ut.onehot(output_class['valid_output'])
    valid_output_xyr = output_xyr['valid_output']

    # Load the model and train
    if os.path.exists('model.h5'): model.model.load_weights('model.h5')
    #BP()
    print("fitting model...")
    model.model.fit(images['train_data'],
                    [train_output_class, train_output_xyr],
                    batch_size=BATCH_SIZE,
                    epochs=args.epochs,
                    validation_data=(images['valid_data'],
                                     [valid_output_class, valid_output_xyr]))
    # model.model.fit(images['train_data'], train_output_xyr,
    #                 batch_size=BATCH_SIZE, epochs=args.epochs,
    #                 validation_data=(images['valid_data'], valid_output_xyr))
    model.model.save_weights('model.h5')
    preds = model.model.predict(images['valid_data'], batch_size=BATCH_SIZE)
    classpreds = preds[0]
    pospreds = preds[1]
    for i, cp in enumerate(classpreds):
        pp = pospreds[i]
        tstr = 'class: %s pred: %s center: %.1f %.1f pred: %.1f %.1f' \
        %  ('b' if output_class['valid_output'][i] else 'w',
            'b' if cp[1]>cp[0] else 'w',
            valid_output_xyr[i][0], valid_output_xyr[i][1],
            pp[0], pp[1])
        print(tstr)
Example #3
0
def get_meta_from_fnames(path):
    batches = ut.get_batches(path, shuffle=False, batch_size=1)
    train_batches = batches['train_batches']
    valid_batches = batches['valid_batches']

    train_classes = []
    for idx, fname in enumerate(train_batches.filenames):
        if '/B_' in fname:
            train_classes.append(0)
        elif '/E_' in fname:
            train_classes.append(1)
        elif '/W_' in fname:
            train_classes.append(2)
        else:
            print('ERROR: Bad filename %s' % fname)
            exit(1)
    train_classes_hot = ut.onehot(train_classes)

    valid_classes = []
    for idx, fname in enumerate(valid_batches.filenames):
        if '/B_' in fname:
            valid_classes.append(0)
        elif '/E_' in fname:
            valid_classes.append(1)
        elif '/W_' in fname:
            valid_classes.append(2)
        else:
            print('ERROR: Bad filename %s' % fname)
            exit(1)
    valid_classes_hot = ut.onehot(valid_classes)

    res = {
        'train_classes': train_classes,
        'train_classes_hot': train_classes_hot,
        'train_filenames': train_batches.filenames,
        'valid_classes': valid_classes,
        'valid_classes_hot': valid_classes_hot,
        'valid_filenames': valid_batches.filenames
    }
    return res
Example #4
0
def main():
    if len(sys.argv) == 1:
        usage(True)

    global GRIDSIZE, RESOLUTION
    RESOLUTION = GRIDSIZE * 2 * 2 * 2

    parser = argparse.ArgumentParser(usage=usage())
    parser.add_argument("--gridsize", required=True, type=int)
    parser.add_argument("--epochs", required=False, default=10, type=int)
    parser.add_argument("--rate", required=False, default=0, type=float)
    parser.add_argument("--visualize", required=False, action='store_true')
    args = parser.parse_args()
    GRIDSIZE = args.gridsize
    RESOLUTION = GRIDSIZE * 2 * 2 * 2
    model = LambdaModel(RESOLUTION, GRIDSIZE, args.rate)
    if args.visualize or not args.epochs:
        if os.path.exists(WEIGHTSFILE):
            print('Loading weights from file %s...' % WEIGHTSFILE)
            model.model.load_weights(WEIGHTSFILE)
    else:
        if os.path.exists(MODELFILE):
            print('Loading model from file %s...' % MODELFILE)
            model.model = km.load_model(MODELFILE)
            if args.rate:
                model.model.optimizer.lr.set_value(args.rate)

    print('Reading data...')
    images = ut.get_data(SCRIPTPATH, (RESOLUTION, RESOLUTION))
    output = ut.get_output_by_key(SCRIPTPATH, 'stones')

    # Debug
    #----------------------------------------------------
    #last_conv_model = km.Model(inputs=model.model.input,
    #                        outputs=model.model.get_layer('lastconv').output)
    #tt = last_conv_model.predict(images['valid_data'][:1])
    #xx = model.model.predict(images['valid_data'][:1])
    #BP()

    #-----------------------------------------------------------
    # Reshape targets to look like the flattened network output
    tt = output['valid_output']
    valid_output = np.array([
        np.transpose(ut.onehot(x, NCOLORS)).reshape(GRIDSIZE * GRIDSIZE * 3)
        for x in tt
    ])
    tt = output['train_output']
    train_output = np.array([
        np.transpose(ut.onehot(x, NCOLORS)).reshape(GRIDSIZE * GRIDSIZE * 3)
        for x in tt
    ])

    means, stds = ut.get_means_and_stds(images['train_data'])
    ut.normalize(images['train_data'], means, stds)
    ut.normalize(images['valid_data'], means, stds)

    fname = output['train_filenames'][0]
    #tt = get_output_of_layer(model.model, 'lastconv', images['train_data'][:1])
    if not args.epochs:
        idx = 0
        xx = get_output_of_layer(model.model, 'out',
                                 images['train_data'][idx:idx + 1])
        print(xx)
        print(train_output[idx:idx + 1])
        BP()

    if args.visualize:
        print('Dumping conv layer images to jpg')
        visualize(model, 'classconv', images['train_data'],
                  ['train/' + x for x in meta['train_filenames']])
        exit(0)

    # Train
    if args.epochs:
        print('Start training...')
        model.train(images['train_data'], train_output, images['valid_data'],
                    valid_output, BATCH_SIZE, args.epochs)
        model.model.save_weights(WEIGHTSFILE)
        model.model.save(MODELFILE)
Example #5
0
def main():
    if len(sys.argv) == 1:
        usage(True)

    global GRIDSIZE, RESOLUTION
    RESOLUTION = GRIDSIZE * 2 * 2 * 2

    parser = argparse.ArgumentParser(usage=usage())
    parser.add_argument("--gridsize", required=True, type=int)
    parser.add_argument("--epochs", required=False, default=10, type=int)
    parser.add_argument("--rate", required=False, default=0, type=float)
    parser.add_argument("--visualize", required=False, action='store_true')
    args = parser.parse_args()
    GRIDSIZE = args.gridsize
    RESOLUTION = GRIDSIZE * 2 * 2 * 2 * 2
    model = GoogleModel(RESOLUTION, GRIDSIZE, args.rate)
    if args.visualize or not args.epochs:
        if os.path.exists(WEIGHTSFILE):
            print('Loading weights from file %s...' % WEIGHTSFILE)
            model.model.load_weights(WEIGHTSFILE)
    else:
        if os.path.exists(MODELFILE):
            print('Loading model from file %s...' % MODELFILE)
            model.model = km.load_model(MODELFILE)
            if args.rate:
                model.model.optimizer.lr.set_value(args.rate)

    print('Reading data...')
    images = ut.get_data(SCRIPTPATH, (RESOLUTION, RESOLUTION))
    output = ut.get_output_by_key(SCRIPTPATH, 'stones')

    #-----------------------------------------------------------
    # Reshape targets to look like the flattened network output
    tt = output['valid_output']
    valid_output = np.array([
        np.transpose(ut.onehot(x, NCOLORS)).reshape(GRIDSIZE * GRIDSIZE * 3)
        for x in tt
    ])
    tt = output['train_output']
    train_output = np.array([
        np.transpose(ut.onehot(x, NCOLORS)).reshape(GRIDSIZE * GRIDSIZE * 3)
        for x in tt
    ])

    means, stds = ut.get_means_and_stds(images['train_data'])
    ut.normalize(images['train_data'], means, stds)
    ut.normalize(images['valid_data'], means, stds)

    # Visualization
    #-----------------
    if args.visualize:
        print('Dumping conv layer images to jpg')
        visualize_channels(model.model, 'lastconv', range(0, 3),
                           images['valid_data'][42:43], 'lastconv.jpg')
        exit(0)

    # If no epochs, just print output and what it should have been
    if not args.epochs:
        idx = 0
        xx = ut.get_output_of_layer(model.model, 'out',
                                    images['train_data'][idx:idx + 1])
        print(xx)
        print(train_output[idx:idx + 1])
        BP()

    # Train
    if args.epochs:
        print('Start training...')
        model.train(images['train_data'], train_output, images['valid_data'],
                    valid_output, BATCH_SIZE, args.epochs)
        model.model.save_weights(WEIGHTSFILE)
        model.model.save(MODELFILE)
Example #6
0
def generate_inp_outp(nsamp, maxint):
    inp = np.random.randint(0, maxint + 1, nsamp).reshape((nsamp, 1))
    outp = ut.onehot(inp)
    return inp, outp