Exemple #1
0
def main():
    tf.random.set_seed(42)

    print('[INFO] loading data...')
    (train_x, train_y), (val_x, val_y) = datasets.cifar10.load_data()
    train_x, val_x = normalize(train_x, val_x)
    train_loader = tf.data.Dataset.from_tensor_slices((train_x, train_y))
    train_loader = train_loader.map(preprocess).shuffle(50000).batch(
        BATCH_SIZE)
    val_loader = tf.data.Dataset.from_tensor_slices((val_x, val_y))
    val_loader = train_loader.map(preprocess).batch(BATCH_SIZE)

    model = VGG16([32, 32, 3])

    criterion = K.losses.CategoricalCrossentropy(from_logits=True)
    criterion_save = K.metrics.CategoricalCrossentropy(from_logits=True)
    metric = K.metrics.CategoricalAccuracy()
    opt = optimizers.Adam(lr=LR)

    for e in range(EPOCHS):
        for batch_x, batch_y in train_loader:
            # [b, 1] => [b]
            batch_y = tf.squeeze(batch_y, axis=1)
            # [b, 10]
            batch_y = tf.one_hot(batch_y, depth=10)

            with tf.GradientTape() as tape:
                logits = model(batch_x)
                loss = criterion(batch_y, logits)
                criterion_save.update_state(batch_y, logits)
                metric.update_state(batch_y, logits)

            grads = tape.gradient(loss, model.trainable_variables)
            # MUST clip gradient here or it will disverge
            grads = [tf.clip_by_norm(g, 15) for g in grads]
            opt.apply_gradients(zip(grads, model.trainable_variables))

        train_loss, acc = criterion_save.result().numpy(), metric.result(
        ).numpy()

        criterion_save.reset_states()
        metric.reset_states()

        for batch_x, batch_y in val_loader:
            # [b, 1] => [b]
            batch_y = tf.squeeze(batch_y, axis=1)
            # [b, 10]
            batch_y = tf.one_hot(batch_y, depth=10)

            logits = model.predict(batch_x)

            criterion_save.update_state(batch_y, logits)
            metric.update_state(batch_y, logits)

        print(
            f'[INFO] Epoch {e+1}/{EPOCHS} - loss: {train_loss:.4f} - accuracy: {acc:.4f} - val loss: {criterion_save.result().numpy():.4f} - val acc: {metric.result().numpy():.4f}'
        )

        criterion_save.reset_states()
        metric.reset_states()
def main():

    tf.random.set_seed(22)

    print('loading data...')

    (x, y), (x_test, y_test) = datasets.cifar10.load_data()
    x, x_test = normalize(x, x_test)

    print(x.shape, y.shape, x_test.shape, y_test.shape)

    train_loader = tf.data.Dataset.from_tensor_slices((x, y))
    train_loader = train_loader.map(prepare_cifar).shuffle(50000).batch(256)

    test_loader = tf.data.Dataset.from_tensor_slices((x_test, y_test))
    test_loader = test_loader.map(prepare_cifar).shuffle(10000).batch(256)
    print('done.')

    model = VGG16([32, 32, 3])

    model.compile(optimizer='Adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.build(input_shape=(None, 32, 32, 3))

    print("Number of variables in the model : ", len(model.variables))

    model.summary()
    model.fit(train_loader, epochs=40, validation_data=test_loader, verbose=1)
Exemple #3
0
def main(_):
    parser = argparse.ArgumentParser()
    parser.add_argument('--action', dest='action', type=str, default='train',
                        help='actions: train, test')
    parser.add_argument('--model_id',type=str)
    parser.add_argument('--test_step',type=int,default=1000)
    args = parser.parse_args()
    if args.action not in ['train', 'test']:
        print('invalid action: ', args.action)
        print("Please input a action: train, test")
    else:
        model = VGG16(configure(args))
        getattr(model, args.action)()
Exemple #4
0
                                                                 img_size),
                                                    batch_size=batch_size,
                                                    class_mode='categorical')

validation_generator = test_datagen.flow_from_directory(
    args["val_dir"],
    target_size=(img_size, img_size),
    batch_size=batch_size,
    class_mode='categorical')

##### Step-3:
############ Create VGG-16 network graph without the last layers and load imagenet pretrained weights
############ Default image size is 160
print('loading the model and the pre-trained weights...')

base_model = VGG16.VGG16(include_top=False, weights='imagenet')
## Here we will print the layers in the network
i = 0
for layer in base_model.layers:
    layer.trainable = False
    i = i + 1
    print(i, layer.name)
#sys.exit()

##### Step-4:
############ Add the top as per number of classes in our dataset
############ Note that we are using Dropout layer with value of 0.2, i.e. we are discarding 20% weights
############

x = base_model.output
x = Dense(128)(x)
def main():

    tf.random.set_seed(22)

    print('loading data...')
    (x, y), (x_test, y_test) = datasets.cifar10.load_data()
    x, x_test = normalize(x, x_test)
    print(x.shape, y.shape, x_test.shape, y_test.shape)
    # x = tf.convert_to_tensor(x)
    # y = tf.convert_to_tensor(y)
    train_loader = tf.data.Dataset.from_tensor_slices((x, y))
    train_loader = train_loader.map(prepare_cifar).shuffle(50000).batch(256)

    test_loader = tf.data.Dataset.from_tensor_slices((x_test, y_test))
    test_loader = test_loader.map(prepare_cifar).shuffle(10000).batch(256)
    print('done.')

    model = VGG16([32, 32, 3])

    # must specify from_logits=True!
    criteon = keras.losses.CategoricalCrossentropy(from_logits=True)
    metric = keras.metrics.CategoricalAccuracy()

    optimizer = optimizers.Adam(learning_rate=0.0001)

    for epoch in range(250):

        for step, (x, y) in enumerate(train_loader):
            # [b, 1] => [b]
            y = tf.squeeze(y, axis=1)
            # [b, 10]
            y = tf.one_hot(y, depth=10)

            with tf.GradientTape() as tape:
                logits = model(x)
                loss = criteon(y, logits)
                # loss2 = compute_loss(logits, tf.argmax(y, axis=1))
                # mse_loss = tf.reduce_sum(tf.square(y-logits))
                # print(y.shape, logits.shape)
                metric.update_state(y, logits)

            grads = tape.gradient(loss, model.trainable_variables)
            # MUST clip gradient here or it will disconverge!
            grads = [tf.clip_by_norm(g, 15) for g in grads]
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            if step % 40 == 0:
                # for g in grads:
                #     print(tf.norm(g).numpy())

                print(epoch, step, 'loss:', float(loss), 'acc:',
                      metric.result().numpy())
                metric.reset_states()

        if epoch % 1 == 0:

            metric = keras.metrics.CategoricalAccuracy()
            for x, y in test_loader:
                # [b, 1] => [b]
                y = tf.squeeze(y, axis=1)
                # [b, 10]
                y = tf.one_hot(y, depth=10)

                logits = model.predict(x)
                # be careful, these functions can accept y as [b] without warnning.
                metric.update_state(y, logits)
            print('test acc:', metric.result().numpy())
            metric.reset_states()
Exemple #6
0
import argparse

ap = argparse.ArgumentParser()
ap.add_argument("-image",
                "--image",
                type=str,
                default='test.jpg',
                help="Path of test image")
ap.add_argument("-num_class",
                "--class",
                type=int,
                default=2,
                help="(required) number of classes to be trained")
args = vars(ap.parse_args())

base_model = VGG16.VGG16(include_top=False, weights=None)
x = base_model.output
x = Dense(128)(x)
x = GlobalAveragePooling2D()(x)
predictions = Dense(args["class"], activation='softmax')(x)

model = Model(inputs=base_model.input, outputs=predictions)

model.load_weights("cv-tricks_fine_tuned_model.h5")

inputShape = (224, 224)  # Assumes 3 channel image
image = load_img(args["image"], target_size=inputShape)
image = img_to_array(image)  # shape is (224,224,3)
image = np.expand_dims(image, axis=0)  # Now shape is (1,224,224,3)

image = image / 255.0
Exemple #7
0
def view_results():
    base_model = VGG16.VGG16(include_top=False, weights=None)
    x = base_model.output
    x = Dense(128)(x)
    x = GlobalAveragePooling2D()(x)
    predictions = Dense(101, activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=predictions)
    model.load_weights("cv-tricks_fine_tuned_model.h5")

    input_shape = (224, 224)  # Assumes 3 channel image
    image = load_img(os.path.join(app.config['FOOD_FOLDER'], 'test_image.jpg'),
                     target_size=input_shape)
    image = img_to_array(image)  # shape is (224,224,3)
    image = np.expand_dims(image, axis=0)  # Now shape is (1,224,224,3)

    image = image / 255.0

    preds = model.predict(image)

    food_prob_array = preds[0]
    food_prob_list = food_prob_array.tolist()
    k.clear_session()

    owd = os.getcwd()
    os.chdir('D:\\Projects\\Python\\FineTunedImageRecognition\\training_data')
    all_subdir = [d for d in os.listdir('.') if os.path.isdir(d)]
    os.chdir(owd)

    food_dict = dict(zip(all_subdir, food_prob_list))

    sorted_d = sorted(food_dict.items(),
                      key=lambda food_dict: food_dict[1],
                      reverse=True)
    print(sorted_d)
    dish_name = string.capwords(str(sorted_d[0][0]).replace("_", " "))
    prob_dish = sorted_d[0][1]

    if prob_dish < app.config['FOOD_PROBABILITY_THRESHOLD']:
        error_message = "The image was not identified as any Dish. Please capture it again by clicking 'Take A Snapshot Again' button"
        return render_template('noResults.html', error_message=error_message)

    else:
        print("The dish was identified to be {} with {} probability".format(
            dish_name, prob_dish))
        nutrients, error = get_nutrients_data_from_usda(dish_name)
        if nutrients is not None:
            weight = request.args.get('weight')
            print("*********** " + weight + " ****************")
            if not weight:
                weight = 100
            weight_multiplier = float(weight) / 100
            print(nutrients)
            for nutrient in nutrients:
                nutrient['value'] = str(
                    round(float(nutrient['value']) * weight_multiplier, 2))
        if error is not None:
            print(error)

        could_also_be_dish_name = sorted_d[1][0]
        could_also_be_dish_name_display_name = string.capwords(
            could_also_be_dish_name.replace("_", " "))
        #   print("The dish could also be {} or {} ".format(sorted_d[1][0], sorted_d[2][0]))
        return render_template(
            'viewResults.html',
            dish_name=dish_name,
            nutrients=nutrients,
            display_second=could_also_be_dish_name_display_name,
            second_dish=could_also_be_dish_name,
            error=error,
            weight=weight)
Exemple #8
0
def main():

    tf.random.set_seed(22)

    print('loading data...')
    (x, y), (x_test, y_test) = datasets.cifar10.load_data()
    x, x_test = normalize(x, x_test)
    print(x.shape, y.shape, x_test.shape, y_test.shape)
    # x = tf.convert_to_tensor(x)
    # y = tf.convert_to_tensor(y)
    train_loader = tf.data.Dataset.from_tensor_slices((x, y))
    train_loader = train_loader.map(prepare_cifar).shuffle(50000).batch(1024)

    test_loader = tf.data.Dataset.from_tensor_slices((x_test, y_test))
    test_loader = test_loader.map(prepare_cifar).shuffle(10000).batch(256)
    print('done.')

    model = VGG16([32, 32, 3])

    criteon = keras.losses.CategoricalCrossentropy(from_logits=True)
    metric = keras.metrics.CategoricalAccuracy()
    optimizer = optimizers.Adam(learning_rate=0.0001)
    ckpt = tf.train.Checkpoint(step=tf.Variable(1),
                               optimizer=optimizer,
                               net=model)
    manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)

    ckpt.restore(manager.latest_checkpoint)
    if manager.latest_checkpoint:
        print("Restored from {}".format(manager.latest_checkpoint))
    else:
        print("Initializing from scratch.")

    for epoch in range(2500):
        for step, (x, y) in enumerate(train_loader):
            y = tf.squeeze(y, axis=1)
            y = tf.one_hot(y, depth=10)
            with tf.GradientTape() as tape:
                logits = model(x)
                loss = criteon(y, logits)
                metric.update_state(y, logits)
            grads = tape.gradient(loss, model.trainable_variables)
            # MUST clip gradient here or it will disconverge!
            grads = [tf.clip_by_norm(g, 15) for g in grads]
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            if step % 40 == 0:
                print(epoch, step, 'loss:', float(loss), 'acc:',
                      metric.result().numpy())
                metric.reset_states()

        if epoch % 1 == 0:
            model.save_weights('easy_checkpoint')
            save_path = manager.save()
            print("Saved checkpoint for step {}: {}".format(
                int(ckpt.step), save_path))

            metric = keras.metrics.CategoricalAccuracy()
            for x, y in test_loader:
                # [b, 1] => [b]
                y = tf.squeeze(y, axis=1)
                # [b, 10]
                y = tf.one_hot(y, depth=10)

                logits = model.predict(x)
                # be careful, these functions can accept y as [b] without warnning.
                metric.update_state(y, logits)
            print('test acc:', metric.result().numpy())
            metric.reset_states()
Exemple #9
0
def main(args):

    parser = argparse.ArgumentParser()

    parser.add_argument('run_name', metavar='N', type=str, help='name of run')
    parser.add_argument('network_type',
                        metavar='N',
                        type=str,
                        help='name of run')
    parser.add_argument('gpu_id',
                        metavar='G',
                        type=str,
                        help='which gpu to use')
    parser.add_argument('--print_every',
                        metavar='N',
                        type=int,
                        help='number of iterations before printing',
                        default=-1)
    parser.add_argument('--print_network',
                        action='store_true',
                        help='print_network for debugging')
    parser.add_argument('--data_parallel',
                        type=int,
                        nargs='+',
                        default=None,
                        help='paralellize across multiple gpus')

    parser.add_argument('--test', action='store_true', help='test')
    parser.add_argument('--test_print', action='store_true', help='test')
    parser.add_argument('--valid_iters',
                        metavar='I',
                        type=int,
                        default=100,
                        help='number of validation iters to run every epoch')
    parser.add_argument('--csv_file',
                        metavar='CSV',
                        type=str,
                        default=None,
                        help='name of csv file to write to')

    parser.add_argument(
        '--sweep_lambda',
        action='store_true',
        help=
        'preform a sweep over lambda values keeping other settings as in args')
    parser.add_argument(
        '--sweep_c',
        action='store_true',
        help=
        'preform a sweep over lambda values keeping other settings as in args')
    parser.add_argument('--sweep_start',
                        metavar='S',
                        type=float,
                        default=0.0,
                        help='lambda value to start sweep at')
    parser.add_argument('--sweep_stop',
                        metavar='E',
                        type=float,
                        default=0.1,
                        help='lambda value to stop sweep at')
    parser.add_argument('--sweep_step',
                        metavar='E',
                        type=float,
                        default=0.01,
                        help='step_size_between_sweep_points')
    parser.add_argument('--sweep_exp',
                        action='store_true',
                        help='step_size_between_sweep_points')
    parser.add_argument('--sweep_resume',
                        action='store_true',
                        help='resume sweep checkpts')
    parser.add_argument(
        '--sweep_con_runs',
        metavar='C',
        type=int,
        default=1,
        help='number of runs to run with same parameters to validate constiancy'
    )

    #checkpoints
    parser.add_argument('--checkpoint_every',
                        type=int,
                        default=10,
                        help='checkpoint every n epochs')
    parser.add_argument('--load_checkpoint',
                        action='store_true',
                        help='load checkpoint with same name')
    parser.add_argument('--resume',
                        action='store_true',
                        help='resume from epoch we left off of when loading')
    parser.add_argument('--checkpoint',
                        type=str,
                        default=None,
                        help='checkpoint to load')

    #params
    parser.add_argument('--epochs',
                        metavar='N',
                        type=int,
                        help='number of epochs to run for',
                        default=50)
    parser.add_argument('--batch_size',
                        metavar='bs',
                        type=int,
                        default=1024,
                        help='batch size')
    parser.add_argument('--lr',
                        metavar='lr',
                        type=float,
                        help='learning rate',
                        default=1e-3)
    parser.add_argument('--rmsprop',
                        action='store_true',
                        help='use rmsprop optimizer')
    parser.add_argument('--sgd', action='store_true', help='use sgd optimizer')
    parser.add_argument('--lr_reduce_on_plateau',
                        action='store_true',
                        help='update optimizer on plateau')
    parser.add_argument('--lr_exp',
                        action='store_true',
                        help='update optimizer on plateau')
    parser.add_argument('--lr_step',
                        type=int,
                        nargs='+',
                        default=None,
                        help='decrease lr by gamma = 0.1 on these epochs')
    parser.add_argument('--lr_list',
                        type=float,
                        nargs='+',
                        default=None,
                        help='decrease lr by gamma = 0.1 on these epochs')

    parser.add_argument('--l2_reg', type=float, default=0.0)

    DataLoader.add_args(parser)

    #added so default argument come from the network that is loaded
    network_type = args[1]
    if network_type == 'auto_fc':
        AutoFCNetwork.add_args(parser)
        network_class = AutoFCNetwork
    elif network_type == 'auto_conv':
        AutoConvNetwork.add_args(parser)
        network_class = AutoConvNetwork
    elif network_type == 'class_conv':
        ClassifyConvNetwork.add_args(parser)
        network_class = ClassifyConvNetwork
    elif network_type == 'vgg':
        VGG16.add_args(parser)
        network_class = VGG16
    elif network_type == 'res':
        ResidualConvNetwork.add_args(parser)
        network_class = ResidualConvNetwork
    elif network_type == 'res152':
        ResNet152.add_args(parser)
        network_class = ResNet152
    elif network_type == 'dense':
        DenseNet.add_args(parser)
        network_class = DenseNet
    else:
        raise ValueError('unknown network type' + str(network_type))

    args = parser.parse_args(args)

    #***************
    # GPU
    #***************

    if args.data_parallel is not None:
        try:
            del os.environ['CUDA_VISIBLE_DEVICES']
        except KeyError:
            pass
        device = torch.device('cuda:%d' % args.data_parallel[0])
    elif args.gpu_id == '-1':
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
        device = torch.device('cpu')
    else:
        print(bcolors.OKBLUE + 'Using GPU' + str(args.gpu_id) + bcolors.ENDC)
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
        device = torch.device('cuda')

    #***************
    # Run
    #***************

    if args.sweep_lambda or args.sweep_c:
        run_dir = args.run_name
        run_ckpt_dir = 'ckpts/%s' % run_dir
        if not os.path.isdir(run_ckpt_dir):
            os.mkdir(run_ckpt_dir)
        val_l = []
        loss_l = []
        op_loss_l = []
        reg_loss_l = []
        nodes_l = []
        images_l = []

        val = args.sweep_start
        while val <= args.sweep_stop:
            print(bcolors.OKBLUE + 'Val: %0.1E' % val + bcolors.ENDC)
            args.run_name = run_name = "%s/l%0.1E" % (run_dir, val)

            for i in range(args.sweep_con_runs):
                if args.sweep_con_runs > 0:
                    args.run_name = run_name + '_' + str(i)

                if not (args.sweep_resume
                        and os.path.isdir('ckpts/' + args.run_name)):

                    print(bcolors.OKBLUE + 'Run: %s' % args.run_name +
                          bcolors.ENDC)

                    if args.sweep_lambda:
                        args.reg_lambda = float(val)
                    elif args.sweep_c:
                        args.reg_c = float(val)
                    run_wrapper = RunWrapper(args, network_class, device)

                    if not args.test:
                        run_wrapper.train()

                    loss, op_loss, reg_loss, rem_nodes, acc = run_wrapper.test(
                        load=args.test)
                    if isinstance(rem_nodes, list):
                        rem_nodes = sum(rem_nodes)
                    x, y_hat = run_wrapper.test_print(plot=False, load=False)

                    val_l.append(val)
                    loss_l.append(loss)
                    op_loss_l.append(op_loss)
                    reg_loss_l.append(reg_loss)
                    nodes_l.append(rem_nodes)
                    if images_l == []:
                        images_l.append(x[:17])
                    images_l.append(y_hat[:17])

                    #hopefully this cleans up the gpu memory
                    del run_wrapper

            if args.sweep_exp:
                if val == 0.0:
                    val = args.sweep_start
                else:
                    val = val * args.sweep_step
            else:
                val = val + args.sweep_step

        loss_l = np.array(loss_l)
        op_loss_l = np.array(op_loss_l)
        reg_loss_l = np.array(reg_loss_l)
        nodes_l = np.array(nodes_l)
        if args.sweep_lambda:
            lc_l = val_l  #[l * args.reg_C for l in val_l]
        elif args.sweep_c:
            lc_l = [c * args.reg_lambda for c in val_l]

        print(lc_l)
        print(nodes_l)

        misc.plot_sweep(run_ckpt_dir, lc_l, op_loss_l, nodes_l)
        #misc.sweep_to_image(images_l, run_ckpt_dir)

    else:
        #default single run behavior
        run_wrapper = RunWrapper(args, network_class, device)

        if args.test_print:
            run_wrapper.test_print()
        elif args.test:
            run_wrapper.test()
        else:
            run_wrapper.train()
Exemple #10
0
def main():

    args = parser.parse_args()

    tf.random.set_seed(22)
    print('loading data...')
    (x, y), (x_test_ori, y_test) = datasets.cifar10.load_data()
    X_train = x / 255.
    X_test_normal = x_test_ori / 255.
    mean = np.mean(X_train, axis=(0, 1, 2, 3))
    std = np.std(X_train, axis=(0, 1, 2, 3))
    print('mean:', mean, 'std:', std)

    model = VGG16([32, 32, 3])

    model.load_weights('easy_checkpoint')

    test_imge_dir = args.imagedir
    print(test_imge_dir)

    for root, dirs, files in os.walk(test_imge_dir, topdown=False):
        for name in files:
            file_name = os.path.join(root, name)
            print(file_name)
            test_imge = cv2.imread(file_name)
            test_imge = test_imge / 255.
            test_imge_one = (test_imge - mean) / (std + 1e-7)
            input_x = np.expand_dims(test_imge_one, 0)
            logits = model.predict(input_x)
            result_index = np.argmax(logits)
            print('predict value: ' + str(cifar10_labels[result_index]) + '\n')

    exit(0)

    img_index_all = 0
    img_index_err = 0
    x_test_dataset = (X_test_normal - mean) / (std + 1e-7)
    test_loader = tf.data.Dataset.from_tensor_slices((x_test_dataset, y_test))
    test_loader = test_loader.map(prepare_cifar).shuffle(10000).batch(1)
    for x_test_input, y in test_loader:
        # [b, 1] => [b]
        y = tf.squeeze(y, axis=1)
        print('true label: ' + str(cifar10_labels[y[0]]))
        # # [b, 10]
        # y = tf.one_hot(y, depth=10)

        logits = model.predict(x_test_input)
        result_index = np.argmax(logits)
        print('predict label: ' + str(cifar10_labels[result_index]) + '\n')
        if result_index != y[0]:
            x_test_ori = (x_test_input * (std + 1e-7) + mean) * 255
            cv2.imwrite(
                'error_result/{}_true_label_{}_predict_{}.jpg'.format(
                    img_index_err, cifar10_labels[y[0]],
                    cifar10_labels[result_index]), x_test_ori[0].numpy())
            img_index_err = img_index_err + 1
        else:
            x_test_ori = (x_test_input * (std + 1e-7) + mean) * 255
            cv2.imwrite(
                'good_result/{}_true_label_{}_predict_{}.jpg'.format(
                    img_index_err, cifar10_labels[y[0]],
                    cifar10_labels[result_index]), x_test_ori[0].numpy())
        img_index_all = img_index_all + 1
    print('所有图片数量 : {} 预测错误数量: {}'.format(img_index_all, img_index_err))
    exit(0)