Beispiel #1
0
def main():
    if len(sys.argv) == 1:
        usage(True)

    parser = argparse.ArgumentParser(usage=usage())
    parser.add_argument("--resolution", required=True, type=int)
    parser.add_argument("--epochs", required=True, type=int)
    args = parser.parse_args()
    model = SimpleModel(args.resolution)
    images = ut.get_data(SCRIPTPATH, (args.resolution, args.resolution))
    meta = ut.get_meta(SCRIPTPATH)
    # Normalize training and validation data by train data mean and std
    means, stds = ut.get_means_and_stds(images['train_data'])
    ut.normalize(images['train_data'], means, stds)
    ut.normalize(images['valid_data'], means, stds)
    model.model.fit(images['train_data'],
                    meta['train_classes'],
                    batch_size=BATCH_SIZE,
                    epochs=args.epochs,
                    validation_data=(images['valid_data'],
                                     meta['valid_classes']))
    # print('>>>>>iter %d' % i)
    # for idx,layer in enumerate(model.model.layers):
    #     weights = layer.get_weights() # list of numpy arrays
    #     print('Weights for layer %d:',idx)
    #     print(weights)
    #model.model.fit(images['train_data'], meta['train_classes'],
    #                batch_size=BATCH_SIZE, epochs=args.epochs)
    #model.model.save('dump1.hd5')
    preds = model.model.predict(images['valid_data'], batch_size=BATCH_SIZE)
    print(preds)
Beispiel #2
0
def main():
    if len(sys.argv) == 1:
        usage(True)

    parser = argparse.ArgumentParser(usage=usage())
    parser.add_argument("--resolution", required=True, type=int)
    parser.add_argument("--epochs", required=True, type=int)
    parser.add_argument("--rate", required=True, type=float)
    args = parser.parse_args()
    model = BEWModel(args.resolution, args.rate)
    images = ut.get_data(SCRIPTPATH, (args.resolution, args.resolution),
                         color_mode='rgb')
    meta = get_meta_from_fnames(SCRIPTPATH)
    # Normalize training and validation data by train data mean and std
    means, stds = ut.get_means_and_stds(images['train_data'])
    # ut.normalize( images['train_data'], means, stds)
    # ut.normalize( images['valid_data'], means, stds)
    ut.dumb_normalize(images['train_data'])
    ut.dumb_normalize(images['valid_data'])
    model.model.fit(images['train_data'],
                    meta['train_classes_hot'],
                    batch_size=BATCH_SIZE,
                    epochs=args.epochs,
                    validation_data=(images['valid_data'],
                                     meta['valid_classes_hot']))
    #preds = model.model.predict(images['valid_data'], batch_size=BATCH_SIZE)
    #print(preds)
    # print('>>>>>iter %d' % i)
    # for idx,layer in enumerate(model.model.layers):
    #     weights = layer.get_weights() # list of numpy arrays
    #     print('Weights for layer %d:',idx)
    #     print(weights)
    #model.model.fit(images['train_data'], meta['train_classes'],
    #                batch_size=BATCH_SIZE, epochs=args.epochs)
    model.model.save('nn_bew.hd5')

    coreml_model = coremltools.converters.keras.convert(
        model.model,
        input_names=['image'],
        image_input_names='image',
        class_labels=['b', 'e', 'w'],
        predicted_feature_name='bew',
        image_scale=1 / 128.0,
        red_bias=-1,
        green_bias=-1,
        blue_bias=-1)

    coreml_model.author = 'joe'
    coreml_model.license = 'MIT'
    coreml_model.short_description = 'Classify go stones and intersections'
    coreml_model.input_description['image'] = 'A 23x23 pixel Image'
    coreml_model.output_description[
        'output1'] = 'A one-hot vector for classes black empty white'
    #coreml_model.save('keras_mnist_cnn.mlmodel')

    #coreml_model = coremltools.converters.keras.convert( model.model, input_names=['image'], image_input_names='image')
    coreml_model.save("nn_bew.mlmodel")
Beispiel #3
0
def main():
    if len(sys.argv) == 1:
        usage(True)

    parser = argparse.ArgumentParser(usage=usage())
    parser.add_argument("--epochs", required=False, default=10, type=int)
    parser.add_argument("--rate", required=False, default=0, type=float)
    parser.add_argument("--visualize", required=False, action='store_true')
    args = parser.parse_args()
    model = MapModel(RESOLUTION, GRIDSIZE, args.rate)
    if args.visualize:
        if os.path.exists(WEIGHTSFILE):
            print('Loading weights from file %s...' % WEIGHTSFILE)
            model.model.load_weights(WEIGHTSFILE)
    else:
        if os.path.exists(MODELFILE):
            print('Loading model from file %s...' % MODELFILE)
            model.model = km.load_model(MODELFILE)
            if args.rate:
                model.model.optimizer.lr.set_value(args.rate)

    print('Reading data...')
    images = ut.get_data(SCRIPTPATH, (RESOLUTION, RESOLUTION))
    meta = ut.get_meta(SCRIPTPATH)
    output_class = ut.get_output_by_key(SCRIPTPATH, 'class')
    output_xyr = ut.get_output_by_key(SCRIPTPATH, 'xyr')
    # Normalize training and validation data by train data mean and std
    means, stds = ut.get_means_and_stds(images['train_data'])
    ut.normalize(images['train_data'], means, stds)
    ut.normalize(images['valid_data'], means, stds)

    train_output_class = ut.onehot(output_class['train_output'])
    train_output_xyr = output_xyr['train_output']
    valid_output_class = ut.onehot(output_class['valid_output'])
    valid_output_xyr = output_xyr['valid_output']

    if args.visualize:
        print('Dumping conv layer images to jpg')
        visualize(model, 'classconv', images['train_data'],
                  ['train/' + x for x in meta['train_filenames']])
        exit(0)

    # Train
    print('Start training...')
    model.train(images['train_data'], [train_output_class, train_output_xyr],
                images['valid_data'], [valid_output_class, valid_output_xyr],
                BATCH_SIZE, args.epochs)
    model.model.save_weights(WEIGHTSFILE)
    model.model.save(MODELFILE)
    model.print_results(images['valid_data'],
                        [valid_output_class, valid_output_xyr])
Beispiel #4
0
def main():
    if len(sys.argv) == 1:
        usage(True)

    parser = argparse.ArgumentParser(usage=usage())
    parser.add_argument("--epochs", required=True, type=int)
    parser.add_argument("--rate", required=False, default=0, type=float)
    args = parser.parse_args()
    model = MapModel(RESOLUTION, GRIDSIZE, args.rate)
    images = ut.get_data(SCRIPTPATH, (RESOLUTION, RESOLUTION))
    output_class = ut.get_output_by_key(SCRIPTPATH, 'class')
    output_xyr = ut.get_output_by_key(SCRIPTPATH, 'xyr')
    # Normalize training and validation data by train data mean and std
    means, stds = ut.get_means_and_stds(images['train_data'])
    ut.normalize(images['train_data'], means, stds)
    ut.normalize(images['valid_data'], means, stds)

    train_output_class = ut.onehot(output_class['train_output'])
    train_output_xyr = output_xyr['train_output']
    valid_output_class = ut.onehot(output_class['valid_output'])
    valid_output_xyr = output_xyr['valid_output']

    # Load the model and train
    if os.path.exists('model.h5'): model.model.load_weights('model.h5')
    #BP()
    print("fitting model...")
    model.model.fit(images['train_data'],
                    [train_output_class, train_output_xyr],
                    batch_size=BATCH_SIZE,
                    epochs=args.epochs,
                    validation_data=(images['valid_data'],
                                     [valid_output_class, valid_output_xyr]))
    # model.model.fit(images['train_data'], train_output_xyr,
    #                 batch_size=BATCH_SIZE, epochs=args.epochs,
    #                 validation_data=(images['valid_data'], valid_output_xyr))
    model.model.save_weights('model.h5')
    preds = model.model.predict(images['valid_data'], batch_size=BATCH_SIZE)
    classpreds = preds[0]
    pospreds = preds[1]
    for i, cp in enumerate(classpreds):
        pp = pospreds[i]
        tstr = 'class: %s pred: %s center: %.1f %.1f pred: %.1f %.1f' \
        %  ('b' if output_class['valid_output'][i] else 'w',
            'b' if cp[1]>cp[0] else 'w',
            valid_output_xyr[i][0], valid_output_xyr[i][1],
            pp[0], pp[1])
        print(tstr)
Beispiel #5
0
def main():
    if len(sys.argv) == 1:
        usage(True)

    parser = argparse.ArgumentParser(usage=usage())
    #parser.add_argument("--resolution", required=True, type=int)
    #parser.add_argument("--gridsize", required=True, type=int)
    parser.add_argument("--epochs", required=True, type=int)
    parser.add_argument("--rate", required=False, default=0, type=float)
    args = parser.parse_args()
    model = StoneModel(RESOLUTION, GRIDSIZE, args.rate)
    images = ut.get_data(SCRIPTPATH, (RESOLUTION,RESOLUTION))
    meta   = ut.get_output_by_key(SCRIPTPATH,'stones')
    # Normalize training and validation data by train data mean and std
    means,stds = ut.get_means_and_stds(images['train_data'])
    ut.normalize(images['train_data'],means,stds)
    ut.normalize(images['valid_data'],means,stds)

    train_output = meta['train_output'] # Lists of len 25 all ones and zeros
    for li in train_output:
        li += [1-x for x in li]
    valid_output = meta['valid_output'] # Lists of len 25 all ones and zeros
    for li in valid_output:
        li += [1-x for x in li]

    # Load the model and train
    if os.path.exists('model.h5'): model.model.load_weights('model.h5')
    #BP()
    model.model.fit(images['train_data'], train_output,
                    batch_size=BATCH_SIZE, epochs=args.epochs,
                    #validation_data=(images['valid_data'], valid_output))
                    validation_data=(images['train_data'], train_output))
    model.model.save_weights('model.h5')
    # print('>>>>>iter %d' % i)
    # for idx,layer in enumerate(model.model.layers):
    #     weights = layer.get_weights() # list of numpy arrays
    #     print('Weights for layer %d:',idx)
    #     print(weights)
    #model.model.fit(images['train_data'], meta['train_classes'],
    #                batch_size=BATCH_SIZE, epochs=args.epochs)
    #model.model.save('dump1.hd5')
    preds = model.model.predict(images['valid_data'], batch_size=BATCH_SIZE)
    print(preds)
Beispiel #6
0
def main():
    if len(sys.argv) == 1:
        usage(True)

    global GRIDSIZE, RESOLUTION

    parser = argparse.ArgumentParser(usage=usage())
    parser.add_argument("--gridsize", required=True, type=int)
    parser.add_argument("--epochs", required=False, default=10, type=int)
    parser.add_argument("--rate", required=False, default=0, type=float)
    parser.add_argument("--visualize", required=False, action='store_true')
    args = parser.parse_args()
    GRIDSIZE = args.gridsize
    RESOLUTION = GRIDSIZE * 2 * 2 * 2 * 2
    model = GCountModel(RESOLUTION, GRIDSIZE, BATCH_SIZE, args.rate)
    if args.visualize or not args.epochs:
        if os.path.exists(WEIGHTSFILE):
            print('Loading weights from file %s...' % WEIGHTSFILE)
            model.model.load_weights(WEIGHTSFILE)
    else:
        if os.path.exists(MODELFILE):
            print('Loading model from file %s...' % MODELFILE)
            model.model = km.load_model(MODELFILE, custom_objects={"th": th})
            if args.rate:
                model.model.optimizer.lr.set_value(args.rate)

    print('Reading data...')
    images = ut.get_data(SCRIPTPATH, (RESOLUTION, RESOLUTION))
    output = ut.get_output_by_key(SCRIPTPATH, 'stones')

    #-----------------------------------------------------------
    # Reshape targets to look like the flattened network output
    tt = output['valid_output']
    valid_output = np.array([[
        x.tolist().count(EMPTY),
        x.tolist().count(WHITE),
        x.tolist().count(BLACK)
    ] for x in tt])
    tt = output['train_output']
    train_output = np.array([[
        x.tolist().count(EMPTY),
        x.tolist().count(WHITE),
        x.tolist().count(BLACK)
    ] for x in tt])

    means, stds = ut.get_means_and_stds(images['train_data'])
    ut.normalize(images['train_data'], means, stds)
    ut.normalize(images['valid_data'], means, stds)

    # Visualization
    #-----------------
    if args.visualize:
        print('Dumping conv layer images to jpg')
        visualize_channels(model.model, 'lastconv', range(0, 3),
                           images['train_data'][700:701], 'lastconv0.jpg')
        visualize_channels(model.model, 'lastconv', range(0, 3),
                           images['train_data'][500:501], 'lastconv1.jpg')
        visualize_channels(model.model, 'lastconv', range(0, 3),
                           images['train_data'][400:401], 'lastconv2.jpg')
        visualize_channels(model.model, 'lastconv', range(0, 3),
                           images['train_data'][300:301], 'lastconv3.jpg')
        visualize_channels(model.model, 'lastconv', range(0, 3),
                           images['train_data'][200:201], 'lastconv4.jpg')
        exit(0)

    # If no epochs, just print output and what it should have been
    if not args.epochs:
        idx = 0
        print('lastconv')
        xx = ut.get_output_of_layer(model.model, 'lastconv',
                                    images['train_data'][idx:idx + 1])
        print(xx)
        print('count_e')
        xx = ut.get_output_of_layer(model.model, 'count_e',
                                    images['train_data'][idx:idx + 1])
        print(xx)
        print('count_w')
        xx = ut.get_output_of_layer(model.model, 'count_w',
                                    images['train_data'][idx:idx + 1])
        print(xx)
        print('count_b')
        xx = ut.get_output_of_layer(model.model, 'count_b',
                                    images['train_data'][idx:idx + 1])
        print(xx)
        print('out')
        xx = model.model.predict(images['train_data'][idx:idx + 1],
                                 batch_size=1)
        print(xx)
        print('target')
        print(train_output[idx:idx + 1])
        BP()

    # Train
    if args.epochs:
        print('Start training...')
        model.train(images['train_data'], train_output, images['valid_data'],
                    valid_output, BATCH_SIZE, args.epochs)
        model.model.save_weights(WEIGHTSFILE)
        model.model.save(MODELFILE)
Beispiel #7
0
def main():
    if len(sys.argv) == 1:
        usage(True)

    global GRIDSIZE, RESOLUTION
    RESOLUTION = GRIDSIZE * 2 * 2 * 2

    parser = argparse.ArgumentParser(usage=usage())
    parser.add_argument("--gridsize", required=True, type=int)
    parser.add_argument("--epochs", required=False, default=10, type=int)
    parser.add_argument("--rate", required=False, default=0, type=float)
    parser.add_argument("--visualize", required=False, action='store_true')
    args = parser.parse_args()
    GRIDSIZE = args.gridsize
    RESOLUTION = GRIDSIZE * 2 * 2 * 2
    model = LambdaModel(RESOLUTION, GRIDSIZE, args.rate)
    if args.visualize or not args.epochs:
        if os.path.exists(WEIGHTSFILE):
            print('Loading weights from file %s...' % WEIGHTSFILE)
            model.model.load_weights(WEIGHTSFILE)
    else:
        if os.path.exists(MODELFILE):
            print('Loading model from file %s...' % MODELFILE)
            model.model = km.load_model(MODELFILE)
            if args.rate:
                model.model.optimizer.lr.set_value(args.rate)

    print('Reading data...')
    images = ut.get_data(SCRIPTPATH, (RESOLUTION, RESOLUTION))
    output = ut.get_output_by_key(SCRIPTPATH, 'stones')

    # Debug
    #----------------------------------------------------
    #last_conv_model = km.Model(inputs=model.model.input,
    #                        outputs=model.model.get_layer('lastconv').output)
    #tt = last_conv_model.predict(images['valid_data'][:1])
    #xx = model.model.predict(images['valid_data'][:1])
    #BP()

    #-----------------------------------------------------------
    # Reshape targets to look like the flattened network output
    tt = output['valid_output']
    valid_output = np.array([
        np.transpose(ut.onehot(x, NCOLORS)).reshape(GRIDSIZE * GRIDSIZE * 3)
        for x in tt
    ])
    tt = output['train_output']
    train_output = np.array([
        np.transpose(ut.onehot(x, NCOLORS)).reshape(GRIDSIZE * GRIDSIZE * 3)
        for x in tt
    ])

    means, stds = ut.get_means_and_stds(images['train_data'])
    ut.normalize(images['train_data'], means, stds)
    ut.normalize(images['valid_data'], means, stds)

    fname = output['train_filenames'][0]
    #tt = get_output_of_layer(model.model, 'lastconv', images['train_data'][:1])
    if not args.epochs:
        idx = 0
        xx = get_output_of_layer(model.model, 'out',
                                 images['train_data'][idx:idx + 1])
        print(xx)
        print(train_output[idx:idx + 1])
        BP()

    if args.visualize:
        print('Dumping conv layer images to jpg')
        visualize(model, 'classconv', images['train_data'],
                  ['train/' + x for x in meta['train_filenames']])
        exit(0)

    # Train
    if args.epochs:
        print('Start training...')
        model.train(images['train_data'], train_output, images['valid_data'],
                    valid_output, BATCH_SIZE, args.epochs)
        model.model.save_weights(WEIGHTSFILE)
        model.model.save(MODELFILE)
Beispiel #8
0
def main():
    if len(sys.argv) == 1:
        usage(True)

    global GRIDSIZE, RESOLUTION
    RESOLUTION = GRIDSIZE * 2 * 2 * 2

    parser = argparse.ArgumentParser(usage=usage())
    parser.add_argument("--gridsize", required=True, type=int)
    parser.add_argument("--epochs", required=False, default=10, type=int)
    parser.add_argument("--rate", required=False, default=0, type=float)
    parser.add_argument("--visualize", required=False, action='store_true')
    args = parser.parse_args()
    GRIDSIZE = args.gridsize
    RESOLUTION = GRIDSIZE * 2 * 2 * 2 * 2
    model = GoogleModel(RESOLUTION, GRIDSIZE, args.rate)
    if args.visualize or not args.epochs:
        if os.path.exists(WEIGHTSFILE):
            print('Loading weights from file %s...' % WEIGHTSFILE)
            model.model.load_weights(WEIGHTSFILE)
    else:
        if os.path.exists(MODELFILE):
            print('Loading model from file %s...' % MODELFILE)
            model.model = km.load_model(MODELFILE)
            if args.rate:
                model.model.optimizer.lr.set_value(args.rate)

    print('Reading data...')
    images = ut.get_data(SCRIPTPATH, (RESOLUTION, RESOLUTION))
    output = ut.get_output_by_key(SCRIPTPATH, 'stones')

    #-----------------------------------------------------------
    # Reshape targets to look like the flattened network output
    tt = output['valid_output']
    valid_output = np.array([
        np.transpose(ut.onehot(x, NCOLORS)).reshape(GRIDSIZE * GRIDSIZE * 3)
        for x in tt
    ])
    tt = output['train_output']
    train_output = np.array([
        np.transpose(ut.onehot(x, NCOLORS)).reshape(GRIDSIZE * GRIDSIZE * 3)
        for x in tt
    ])

    means, stds = ut.get_means_and_stds(images['train_data'])
    ut.normalize(images['train_data'], means, stds)
    ut.normalize(images['valid_data'], means, stds)

    # Visualization
    #-----------------
    if args.visualize:
        print('Dumping conv layer images to jpg')
        visualize_channels(model.model, 'lastconv', range(0, 3),
                           images['valid_data'][42:43], 'lastconv.jpg')
        exit(0)

    # If no epochs, just print output and what it should have been
    if not args.epochs:
        idx = 0
        xx = ut.get_output_of_layer(model.model, 'out',
                                    images['train_data'][idx:idx + 1])
        print(xx)
        print(train_output[idx:idx + 1])
        BP()

    # Train
    if args.epochs:
        print('Start training...')
        model.train(images['train_data'], train_output, images['valid_data'],
                    valid_output, BATCH_SIZE, args.epochs)
        model.model.save_weights(WEIGHTSFILE)
        model.model.save(MODELFILE)