コード例 #1
0
def get_densenet():
    model = DenseNet(nb_dense_block=2, classes=1)
    model.summary()

    # compile model
    print('compile model...')
    optimizer = optimizers.Adam()
    model.compile(loss='mean_squared_error', optimizer=optimizer)

    return model
コード例 #2
0
if K.image_dim_ordering() == 'th':
  # Transpose image dimensions (Theano uses the channels as the 1st dimension)
  im = im.transpose((2,0,1))

  # Use pre-trained weights for Theano backend
  weights_path = 'imagenet_models/densenet121_weights_th.h5'
else:
  # Use pre-trained weights for Tensorflow backend
  weights_path = 'imagenet_models/densenet121_weights_tf.h5'

# Insert a new dimension for the batch_size
im = np.expand_dims(im, axis=0)

# Test pretrained model
model = DenseNet(reduction=0.5, classes=1000, weights_path=weights_path)

sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

out = model.predict(im)

warnings.filterwarnings('ignore', '.*do not.*',)
# Load ImageNet classes file
classes = []
with open('resources/classes.txt', 'r') as list_:
    for line in list_:
        classes.append(line.rstrip('\n'))

print('Prediction: '+str(classes[np.argmax(out)]))
コード例 #3
0
def main():
    # create directory to save the preprocessed images
    if not os.path.exists('preprocessed_images'):
        os.mkdir('preprocessed_images')
        
    print("Reading Arguments: ")
    args = get_arguments()

    try:
        images = open(args.img_list).readlines()
    except Exception as e:
        print("Unable to read the image list")
        print(e)
        exit()


    print("Processing {} images.".format(len(images)))

    header = ['image_name']
    header.extend(list(range(1,13)))
    rows = []

    # Initialize the pretrained model
    print("Intializing the pretrained model")
    model = DenseNet(reduction=0.5, classes=cfg.num_classes, weights_path=cfg.weights_path)
    sgd = SGD(lr=1e-2, decay=1e-4, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
    print("Model loaded sucessfully!")

    for img_name in images:
        img_name = img_name.strip()
        image_row = dict()
        image_row['image_name'] = img_name.split('/')[-1]
        image = cv2.imread(img_name)
        (h, w) = image.shape[:2]
        center = (w/2, h/2)
        M = cv2.getRotationMatrix2D(center, 180, 1.0)
        image = cv2.warpAffine(image, M, (w, h))
        im_name = osp.join('preprocessed_images', img_name.split('/')[-1])
        cv2.imwrite(im_name, image)

        if args.grid_type == 12:
            gridw, gridh = cfg.GRIDW, cfg.GRIDH
        else:
            raise ValueError(INVALID_GRID_TYPE)

        # Pre-process the image.
        crop_dims = args.crop_dims
        if type(crop_dims) == str:
            crop_dims = eval(crop_dims)
        rgb = RGBPreprocess(crop_dims)
        data = rgb.process_img(cv2.cvtColor(image, cv2.COLOR_BGR2RGB), image_row['image_name'], args.debug, gridh, gridw)

        # run for each grid location.
        for i, im in enumerate(data):
            # preprocess the image
            im = Image.fromarray(im)
            im = im.resize((224, 224), Image.LANCZOS)
            im = np.asarray(im, np.float64)
            im = np.expand_dims(im, axis=0)
            im = mean_subtraction(im)
                
            # Run prediction
            out = model.predict(im)
            predict = np.argmax(out)

            # write prediction to the row dictionary.
            image_row[i+1] = cfg.class_names[predict]
        rows.append(image_row)

    # write to csv
    with open(args.output_file, 'w') as csvf:
        writer = csv.DictWriter(csvf, fieldnames=header)
        writer.writeheader()
        for row in rows:
            writer.writerow(row)
コード例 #4
0
weight_path = "{}_weights.best.hdf5".format('model')
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
earlystopping = EarlyStopping(monitor='val_loss',
                              min_delta=0,
                              patience=16,
                              verbose=0,
                              mode='auto')
checkpoint = ModelCheckpoint(weight_path,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min',
                             save_weights_only=True)
tensorboard = TensorBoard(log_dir="./logs/{}".format(time()))

model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])

hist = model.fit(x_train,
                 y_train,
                 batch_size=128,
                 epochs=50,
                 callbacks=[earlystopping, checkpoint, tensorboard],
                 validation_split=0.3)
test_loss, test_acc = model.evaluate(x_test, y_test,
                                     batch_size=128)  # original basize 32
# print(hist.history)

# Plot training & validation accuracy values

plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])