Пример #1
0
def get_model(test_image_path):
    my_model = InceptionV3()
    img = image.load_img(test_image_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = prepro_inp(x)
    x = np.vstack([x])

    return x[0], my_model
Пример #2
0
def main():
    gpu_num = torch.cuda.device_count()
    train_loader = data_utils.DataLoader(dataset=DataProvider(),
                                         batch_size=60 * gpu_num,
                                         num_workers=18,
                                         worker_init_fn=worker_init_fn)
    val_loader = data_utils.DataLoader(dataset=DataProvider(val=True),
                                       batch_size=60 * gpu_num,
                                       num_workers=18,
                                       worker_init_fn=worker_init_fn)
    best_acc = 0
    model = InceptionV3().cuda()
    model = nn.DataParallel(model)
    # optimizer = optim.Adam(model.module.parameters(), lr=1e-4)
    optimizer = optim.RMSprop(
        model.module.parameters(),
        lr=0.05 / 2,
        alpha=0.9,
        eps=1.0,
        momentum=0.9,
    )  # weight_decay=0.5)

    criterion = nn.CrossEntropyLoss()
    start_epoch = 0
    resume = 'model_best.pth.tar'
    if os.path.isfile(resume):
        checkpoint = torch.load(resume)
        start_epoch = checkpoint['epoch']
        best_acc = checkpoint['best_acc']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            resume, checkpoint['epoch']))

    lr_scheduler = optim.lr_scheduler.StepLR(
        optimizer,
        step_size=int(2e6 / len(train_loader)),
        gamma=0.5,
    )
    # last_epoch=start_epoch)
    for epoch in range(start_epoch, 500):
        lr_scheduler.step()
        np.random.seed()
        train(model, optimizer, criterion, train_loader, epoch)
        acc = val(model, criterion, val_loader, epoch)
        is_best = acc > best_acc
        best_acc = max(acc, best_acc)
        if epoch % 2 == 0:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    # 'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc': best_acc,
                    'optimizer': optimizer.state_dict(),
                },
                is_best)
Пример #3
0
def main():
    train_loader = data_utils.DataLoader(dataset=DataProvider(),
                                         batch_size=120,
                                         num_workers=18,
                                         worker_init_fn=worker_init_fn)
    val_loader = data_utils.DataLoader(dataset=DataProvider(val=True),
                                       batch_size=120,
                                       num_workers=18,
                                       worker_init_fn=worker_init_fn)
    best_acc = 0
    model = InceptionV3().cuda()
    model = nn.DataParallel(model)
    # optimizer = optim.Adam(model.module.parameters(), lr=1e-4)
    optimizer = optim.RMSprop(model.module.parameters(),
                              lr=0.05 / 2,
                              momentum=0.9,
                              weight_decay=0.5)

    criterion = nn.CrossEntropyLoss()
    start_epoch = 0
    resume = 'model_best.pth.tar'
    if os.path.isfile(resume):
        checkpoint = torch.load(resume)
        start_epoch = checkpoint['epoch']
        best_acc = checkpoint['best_acc']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            resume, checkpoint['epoch']))

    lr_scheduler = optim.lr_scheduler.StepLR(
        optimizer,
        step_size=int(2e6 / len(train_loader)),
        gamma=0.5,
    )
    # last_epoch=start_epoch)
    for epoch in range(start_epoch, 500):
        lr_scheduler.step()
        np.random.seed()
        # train(model, optimizer, criterion, train_loader, epoch)
        val(model, criterion, val_loader, epoch)
Пример #4
0
def predict(test_image_path):
    my_model = InceptionV3()
    my_model.model.summary()
    # my_model = inc_net.InceptionV3()

    # data = Dataset()
    # data.load_data_tfrecord()
    # print(data.train_data)
    # my_model.train(data, epochs=2)

    img = image.load_img(test_image_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = prepro_inp(x)
    x = np.vstack([x])

    prediction = my_model.predict(x)

    # for i in my_model.decode_predict(prediction):
    #     print(i)
    return x, my_model.decode_predict(prediction), my_model
def main():
    """
    Script entrypoint
    """
    t_start = datetime.now()
    header = ["Start Time", "End Time", "Duration (s)"]
    row = [t_start.strftime(DEFAULT_DATE_TIME_FORMAT)]

    dnn = InceptionV3()

    # show class indices
    print('****************')
    for cls, idx in dnn.train_batches.class_indices.items():
        print('Class #{} = {}'.format(idx, cls))
    print('****************')

    print(dnn.model.summary())

    dnn.train(t_start,
              epochs=dnn.num_epochs,
              batch_size=dnn.batch_size,
              training=dnn.train_batches,
              validation=dnn.valid_batches)

    # save trained weights
    dnn.model.save(dnn.file_weights + 'old')

    dnn.model.save_weights(dnn.file_weights)
    with open(dnn.file_architecture, 'w') as f:
        f.write(dnn.model.to_json())

    t_end = datetime.now()
    difference_in_seconds = get_difference_in_seconds(t_start, t_end)

    row.append(t_end.strftime(DEFAULT_DATE_TIME_FORMAT))
    row.append(str(difference_in_seconds))

    append_row_to_csv(complete_run_timing_file, header)
    append_row_to_csv(complete_run_timing_file, row)
Пример #6
0
                                   horizontal_flip=True,
                                   vertical_flip=True,
                                   brightness_range=(0.3, 1.)).flow(
                                       X_train,
                                       y=y_train,
                                       batch_size=batch_size))
            yield X_train, y_train


import gc
import psutil
from model import find_patches_from_slide, predict_batch_from_model, predict_from_model, InceptionV3

model = InceptionV3(include_top=True,
                    weights='i_3.h5',
                    input_tensor=None,
                    input_shape=(256, 256, 3),
                    pooling=None,
                    classes=2)

#model = simple_model(pretrained_weights='s_1.h5')


# # Data Path Load
def read_data_path():
    image_paths = []
    with open('train.txt', 'r') as f:
        for line in f:
            line = line.rstrip('\n')
            image_paths.append(line)
    #print('image_path # : ',len(image_paths))
    "attended_lectures": 0
}, {
    "sub_name": "SDL_TUT",
    "total_lectures": 0,
    "attended_lectures": 0
}, {
    "sub_name": "SDL",
    "total_lectures": 0,
    "attended_lectures": 0
}]

client = pymongo.MongoClient(host='localhost', port=27017)
db = client.dbms_database
print('Database Created')
#print(client.list_database_names())
model = InceptionV3()


def insert_student(s):

    db.students.insert_one(s)
    di = {"uid": s['uid'], "subjects": general}

    db.subjects.insert_one(di)

    return True


def check_if_face(imagepath):
    image = cv2.imread(imagepath)
Пример #8
0
parser = argparse.ArgumentParser("InceptionV3 model.")
parser.add_argument('-c', "--checkpoint_file", default="./inception_v3.ckpt")
parser.add_argument('-b', "--batch_size", default=32, type=int)
args = parser.parse_args([])

batch_shape = [args.batch_size, 299, 299, 3]
#image_true_labels = pd.read_csv("images/images.csv")
Id2name = pd.read_csv("images/categories.csv")
#image_lc = image_true_labels[['TrueLabel','ImageId']]\
#    .merge(image_true_cats,'left',left_on='TrueLabel',right_on='CategoryId')[['TrueLabel','CategoryName','ImageId']]
image_lc = pd.read_csv("images/images_all.csv")
image_iterator = load_images('images/images', batch_shape)

sess = tf.Session()
inceptionV3 = InceptionV3(num_classes=1001)
inceptionV3.restore(args.checkpoint_file, sess)

#adversarial generating
gen_adversarial_sample = fgm(inceptionV3, eps=0.01, clip_min=-1, clip_max=1)
sm = saliency_map(inceptionV3)

writer = tf.summary.FileWriter('./graphs', sess.graph)
writer.close()

predictedIds = []
adversarialPredictedIds = []
imageIds = []
for filenames, images in image_iterator:
    print("current batch size:", len(filenames))
    adversarial_samples = sess.run(gen_adversarial_sample,
Пример #9
0
def main(args):
  logger = init_logger(args.run_name)

  # Datasets
  img_height, img_width, _ = InceptionV3.SHAPE

  def prep_func(f, x, y):
    x = read_image(x)
    x = decode_png(x)
    x = resize(x, img_height, img_width)
    return f, x, y

  trn_ds = make_dataset(args.train_dir, args.batch_size, prep_func,
                        shuffle=True, repeat=True, add_filenames=True)
  val_ds = make_dataset(args.train_dir, args.batch_size, prep_func,
                        shuffle=False, repeat=False, add_filenames=True)
  tst_ds = make_dataset(args.train_dir, args.batch_size, prep_func,
                        shuffle=False, repeat=False, add_filenames=True)

  num_classes = len(trn_ds.labels_map)

  it = tf.data.Iterator.from_structure(
    trn_ds.dataset.output_types, trn_ds.dataset.output_shapes)

  num_trn_batches = int(math.ceil(float(trn_ds.size) / args.batch_size))
  num_val_batches = int(math.ceil(float(val_ds.size) / args.batch_size))
  num_tst_batches = int(math.ceil(float(tst_ds.size) / args.batch_size))

  trn_init_op = it.make_initializer(trn_ds.dataset)
  val_init_op = it.make_initializer(val_ds.dataset)
  tst_init_op = it.make_initializer(tst_ds.dataset)

  # Filename, input image and corrsponding one hot encoded label
  f, x, y = it.get_next()

  sess = tf.Session()

  # Model and logits
  is_training = tf.placeholder(dtype=tf.bool)
  model = InceptionV3(nb_classes=num_classes, is_training=is_training)
  logits = model.get_logits(x)

  attacks_ord = {
    'inf': np.inf,
    '1': 1,
    '2': 2
  }

  # FGM attack
  attack_params = {
    'eps': args.eps,
    'clip_min': 0.0,
    'clip_max': 1.0,
    'ord': attacks_ord[args.ord],
  }
  attack = FastGradientMethod(model, sess)

  # Learning rate with exponential decay
  global_step = tf.Variable(0, trainable=False)
  global_step_update_op = tf.assign(global_step, tf.add(global_step, 1))
  lr = tf.train.exponential_decay(
    args.initial_lr, global_step, args.lr_decay_steps,
    args.lr_decay_factor, staircase=True)

  cross_entropy = CrossEntropy(model, attack=attack,
                               smoothing=args.label_smth,
                               attack_params=attack_params,
                               adv_coeff=args.adv_coeff)
  loss = cross_entropy.fprop(x, y)

  # Gradients clipping
  opt = tf.train.RMSPropOptimizer(learning_rate=lr, decay=args.opt_decay,
                                  epsilon=1.0)
  gvs = opt.compute_gradients(loss)
  clip_min, clip_max = -args.grad_clip, args.grad_clip

  capped_gvs = []
  for g, v in gvs:
    capped_g = tf.clip_by_value(g, clip_min, clip_max) \
      if g is not None else tf.zeros_like(v)
    capped_gvs.append((capped_g, v))

  train_op = opt.apply_gradients(capped_gvs)

  saver = tf.train.Saver()
  global_init_op = tf.global_variables_initializer()

  if args.load_model and args.restore_path:
    saver.restore(sess, args.restore_path)
    logger.info("Model restored from: ".format(args.restore_path))


  with sess.as_default():
    sess.run(global_init_op)

    best_val_acc = -1
    for epoch in range(args.num_epochs):
      logger.info("Epoch: {:04d}/{:04d}".format(epoch + 1, args.num_epochs))
      sess.run(trn_init_op)

      for batch in range(num_trn_batches):
        loss_np, lr_np, _ = sess.run([loss, lr, train_op],
                                     feed_dict={is_training: True})
        logger.info("Batch: {:04d}/{:04d}, loss: {:.05f}, lr: {:.05f}"
          .format(batch + 1, num_trn_batches, loss_np, lr_np))

      logger.info("Epoch completed...")

      sess.run(global_step_update_op)
      val_acc = eval_acc(sess, logits, y, num_val_batches,
                         is_training, val_init_op)
      logger.info("Validation set accuracy: {:.05f}".format(val_acc))

      if best_val_acc < val_acc:
        output_path = saver.save(sess, args.model_path)
        logger.info("Model was successfully saved: {}".format(output_path))
        best_val_acc = val_acc
        pass

    tst_acc = eval_acc(sess, logits, y, num_tst_batches,
                       is_training, tst_init_op)
    logger.info("Test set accuracy: {:.05f}".format(tst_acc))