Beispiel #1
0
def main(args):
  snapshot = os.path.join(args.exphome, 'save', '{}.h5'.format(args.timestamp))
  assert os.path.exists(snapshot)

  model = Milk(input_shape = (args.bag_size, args.x_size, args.x_size, 3),
               encoder_args = deep_encoder_args,
               use_gate = True,
               mode = args.mil,
               deep_classifier = True)
  #model.load_weights(snapshot, by_name=True)
  model.summary()
  enc_model = tf.keras.Model(inputs=model.input, outputs=model.get_layer('encoder_glob_pool').output)
  enc_model.load_weights(snapshot, by_name=True)
  deep_model = tf.keras.Model(inputs=model.input, outputs=model.get_layer('deep_4').output)
  deep_model.load_weights(snapshot, by_name=True)
  cls_model = tf.keras.Model(inputs=model.input, outputs=model.get_layer('mil_dense_2').output)
  cls_model.load_weights(snapshot, by_name=True)
  att_model = tf.keras.Model(inputs=model.input, outputs=model.get_layer('att_2').output)
  att_model.load_weights(snapshot, by_name=True)
  
  data = gen_data(os.path.join(args.exphome, 'test_lists', '{}.txt'.format(args.timestamp)), bag_size=args.bag_size)

  fig = plt.figure(figsize=(3,3), dpi=300)

  attentions = get_features(att_model, data)
  attentions = np.squeeze(np.concatenate([v for k,v in attentions.items()], axis=0))
  print('attentions: ', attentions.shape)

  enc_features = get_features(enc_model, data)
  enc_tsne, ys = project_features(enc_features)
  draw_projection(enc_tsne, ys, attentions, dst='/home/nathan/Dropbox/projects/milk/encoder_tsne.png')

  deep_features = get_features(deep_model, data)
  deep_tsne, ys = project_features(deep_features)
  draw_projection(deep_tsne, ys, attentions, dst='/home/nathan/Dropbox/projects/milk/deep_tsne.png')

  cls_features = get_features(cls_model, data)
  cls_tsne, ys = project_features(cls_features)
  draw_projection(cls_tsne, ys, attentions=None, dst='/home/nathan/Dropbox/projects/milk/classifier_tsne.png')
Beispiel #2
0
        # ENCAPSULATION: PRotec our data, let's not have a class overwrite another
        # LOSING CONTROL, setter can block unintended input

    # We often need a getter and setter for all attributes
    def get_categories(self):
        return self.categories

    def set_categories(self, new_categories):
        self.categories = new_categories


my_store = Store("Moe's Groceries", [
    Category('alcohol',
             [Item("Bourbon", 100), Item("Vodka", 50)]),
    Category('matches', [Item('Flint', 2)]),
    Category('cough syrup', [Milk('Cowgirl Creamery', 300, "Whole", 20200131)])
])

print(my_store)
selection = ""
while selection is not "q":
    selection = input(
        f"Please input a number between 1 and {len(my_store.categories)} or 'q' to quit: "
    )
    if selection is not "q":
        print(f"User selected {selection}")
        print(my_store.get_categories()[int(selection) - 1])

## Task:
## Build an interactive store that when opened,
# displays a name and all shopping categories.
Beispiel #3
0
from concrete_coffee import ConcreteCoffee
from milk import Milk
from sugar import Sugar
from vanilla import Vanilla

myCoffee = ConcreteCoffee()

print("Ingredients : {}, Cost : {}, Sales Tax : {}".format(
    myCoffee.get_ingredients(), myCoffee.get_cost(), myCoffee.get_tax()))

myMilk = Milk(myCoffee)
print("Ingradients : {}, Cost : {}, Sales Tax : {}".format(
    myMilk.get_ingredients(), myMilk.get_cost(), myMilk.get_tax()))

mySugar = Sugar(myCoffee)
print("Ingradients : {}, Cost : {}, Sales Tax : {}".format(
    mySugar.get_ingredients(), mySugar.get_cost(), mySugar.get_tax()))

myVanilla = Vanilla(myCoffee)
print("Ingredients : {}, Cost {}, Sales Tax : {}".format(
    myVanilla.get_ingradients(), myVanilla.get_cost(), myVanilla.get_tax()))
Beispiel #4
0
def main(args):
  """ 
  1. Create generator datasets from the provided lists
  2. train and validate Milk

  v0 - create datasets within this script
  v1 - factor monolithic training_utils.mil_train_loop !!
  tpu - replace data feeder and mil_train_loop with tf.keras.Model.fit()
  """
  # Take care of passed in test and val lists for the ensemble experiment
  # we need both test list and val list to be given.
  if (args.test_list is not None) and (args.val_list is not None):
    train_list, val_list, test_list = load_lists(
      os.path.join(args.data_patt, '*.npy'), 
      args.val_list, args.test_list)
  else:
    train_list, val_list, test_list = data_utils.list_data(
      os.path.join(args.data_patt, '*.npy'), 
      val_pct=args.val_pct, 
      test_pct=args.test_pct, 
      seed=args.seed)
  
  if args.verbose:
    print("train_list:")
    print(train_list)
    print("val_list:")
    print(val_list)
    print("test_list:")
    print(test_list)

  ## Filter out unwanted samples:
  train_list = filter_list_by_label(train_list)
  val_list = filter_list_by_label(val_list)
  test_list = filter_list_by_label(test_list)

  train_list = data_utils.enforce_minimum_size(train_list, args.bag_size, verbose=True)
  val_list = data_utils.enforce_minimum_size(val_list, args.bag_size, verbose=True)
  test_list = data_utils.enforce_minimum_size(test_list, args.bag_size, verbose=True)
  transform_fn = data_utils.make_transform_fn(args.x_size, 
                                              args.y_size, 
                                              args.crop_size, 
                                              args.scale, 
                                              normalize=True)
  # train_x, train_y = data_utils.load_list_to_memory(train_list, case_label_fn)
  # val_x, val_y = data_utils.load_list_to_memory(val_list, case_label_fn)

  # train_generator = data_utils.generate_from_memory(train_x, train_y, 
  #     batch_size=args.batch_size,
  #     bag_size=args.bag_size, 
  #     transform_fn=transform_fn,)
  # val_generator = data_utils.generate_from_memory(val_x, val_y, 
  #     batch_size=args.batch_size,
  #     bag_size=args.bag_size, 
  #     transform_fn=transform_fn,)

  # train_generator = subset_and_generate(train_list, case_label_fn, transform_fn, args, pct=0.5)
  # val_generator = subset_and_generate(val_list, case_label_fn, transform_fn, args, pct=1.)

  train_sequence = data_utils.MILSequence(train_list, 0.5, args.batch_size, args.bag_size, args.steps_per_epoch,
    case_label_fn, transform_fn, pad_first_dim=True)
  val_sequence = data_utils.MILSequence(val_list, 1., args.batch_size, args.bag_size, 100,
    case_label_fn, transform_fn, pad_first_dim=True)

  # print('Testing batch generator')
  # ## Some api change between nightly built TF and R1.5
  # x, y = next(train_generator)
  # print('x: ', x.shape)
  # print('y: ', y.shape)
  # del x
  # del y 

  print('Model initializing')
  encoder_args = get_encoder_args(args.encoder)
  model = Milk(input_shape=(args.bag_size, args.crop_size, args.crop_size, 3), 
               encoder_args=encoder_args, mode=args.mil, use_gate=args.gated_attention,
               temperature=args.temperature, freeze_encoder=args.freeze_encoder, 
               deep_classifier=args.deep_classifier)
  
  if args.tpu:
    # Need to use tensorflow optimizer
    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
  else:
    # optimizer = tf.keras.optimizers.Adam(lr=args.learning_rate, decay=1e-6)
    optimizer = training_utils.AdamAccumulate(lr=args.learning_rate,
                               accum_iters=args.accumulate)

  exptime = datetime.datetime.now()
  exptime_str = exptime.strftime('%Y_%m_%d_%H_%M_%S')
  out_path = os.path.join(args.save_prefix, '{}.h5'.format(exptime_str))
  if not os.path.exists(os.path.dirname(out_path)):
    os.makedirs(os.path.dirname(out_path)) 

  # Todo : clean up
  val_list_file = os.path.join('./val_lists', '{}.txt'.format(exptime_str))
  with open(val_list_file, 'w+') as f:
    for v in val_list:
      f.write('{}\n'.format(v))

  test_list_file = os.path.join('./test_lists', '{}.txt'.format(exptime_str))
  with open(test_list_file, 'w+') as f:
    for v in test_list:
      f.write('{}\n'.format(v))

  ## Write out arguments passed for this session
  arg_file = os.path.join('./args', '{}.txt'.format(exptime_str))
  with open(arg_file, 'w+') as f:
    for a in vars(args):
      f.write('{}\t{}\n'.format(a, getattr(args, a)))

  ## Transfer to TPU 
  if args.tpu:
    print('Setting up model on TPU')
    if 'COLAB_TPU_ADDR' not in os.environ:
      print('ERROR: Not connected to a TPU runtime!')
    else:
      tpu_address = 'grpc://' + os.environ['COLAB_TPU_ADDR']
      print ('TPU address is', tpu_address)
    tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu=tpu_address)
    strategy = tf.contrib.tpu.TPUDistributionStrategy(tpu_cluster_resolver)
    model = tf.contrib.tpu.keras_to_tpu_model(model, strategy)

  model.compile(optimizer=optimizer,
                loss=tf.keras.losses.categorical_crossentropy,
                metrics=['categorical_accuracy'])
  model.summary()

  ## Replace randomly initialized weights after model is compiled and on the correct device.
  if args.pretrained_model is not None and os.path.exists(args.pretrained_model):
    print('Replacing random weights with weights from {}'.format(args.pretrained_model))
    model.load_weights(args.pretrained_model, by_name=True)

  if args.early_stop:
    callbacks = [
        tf.keras.callbacks.EarlyStopping(monitor = 'val_loss', 
                                         min_delta = 0.00001, 
                                         patience = 5, 
                                         verbose = 1, 
                                         mode = 'auto',)
    ]
  else:
    callbacks = []

  try:
    # refresh the data generator with a new subset each epoch
    # test on the same validation data
    # for epc in range(args.epochs):
      # train_generator = []
      # train_generator = subset_and_generate(train_list, case_label_fn, transform_fn, args, pct=0.25)
    model.fit_generator(generator=train_sequence,
                        validation_data=val_sequence,
                        #steps_per_epoch=args.steps_per_epoch, 
                        epochs=args.epochs,
                        workers=8,
                        use_multiprocessing=True,
                        callbacks=callbacks, )

  except KeyboardInterrupt:
    print('Keyboard interrupt caught')
  except Exception as e:
    print('Other error caught')
    print(type(e))
    print(e)
  finally:
    model.save(out_path)
    print('Saved model: {}'.format(out_path))
    print('Training done. Find val and test datasets at')
    print(val_list_file)
    print(test_list_file)
Beispiel #5
0
def main(args):
    model = Milk()
    x_dummy = tf.zeros(shape=[MIN_BAG, CROP_SIZE, CROP_SIZE, 3], 
                        dtype=tf.float32)
    retvals = model(x_dummy, verbose=True, return_embedding=True)
    for k, retval in enumerate(retvals):
        print('retval {}: {}'.format(k, retval.shape))

    saver = tfe.Saver(model.variables)
    if args.snapshot is None:
        snapshot = tf.train.latest_checkpoint(args.snapshot_dir)
    else:
        snapshot = args.snapshot
    print('Restoring from {}'.format(snapshot))

    saver.restore(snapshot)
    model.summary()

    test_list = read_test_list(args.test_list)

    yhats, ytrues = [], []
    features_case, features_classifier = [], []
    for test_case in test_list:
        _features, _attention = [], []
        _high_attention ,_high_images = [], []
        _low_attention, _low_images = [], []
        for _ in range(args.repeats):
            case_name = os.path.basename(test_case).replace('.npy', '')
            case_x, case_y = data_utils.load(
                data_path = test_case,
                transform_fn=transform_fn,
                # const_bag=100,
                all_tiles=True,
                case_label_fn=case_label_fn)
            retvals = model(tf.constant(case_x), training=False, return_embedding=True)

            yhat, attention, features, feat_case, feat_class = retvals
            attention = np.squeeze(attention.numpy(), axis=0)
            high_att_idx, high_att_imgs, low_att_idx, low_att_imgs = get_attention_extremes(
                attention, case_x, n = 15)

            yhats.append(yhat.numpy())
            ytrues.append(case_y)
            features_case.append(feat_case.numpy())        
            features_classifier.append(feat_class.numpy())

            _features.append(features.numpy())
            _attention.append(attention)
            _high_attention.append(high_att_idx)
            _high_images.append(high_att_imgs)
            _low_attention.append(low_att_idx)
            _low_images.append(low_att_imgs)
            print('Case {}: label={} predicted={}'.format(
                test_case, np.argmax(case_y,axis=-1), np.argmax(yhat, axis=-1)))

        features = np.concatenate(_features, axis=0)
        attention = np.concatenate(_attention, axis=0)
        high_attention = np.concatenate(_high_attention, axis=0)
        high_attention_images = np.concatenate(_high_images, axis=0)
        low_attention = np.concatenate(_low_attention, axis=0)
        low_attention_images = np.concatenate(_low_images, axis=0)

        savepath = '{}_{}.png'.format(args.savebase, case_name)
        print('Saving figure {}'.format(savepath))
        z = draw_projection(features, attention, savepath=savepath)

        savepath = '{}_{}_imgs.png'.format(args.savebase, case_name)
        print('Saving figure {}'.format(savepath))
        draw_projection_with_images(z, attention, 
            high_attention, high_attention_images, 
            low_attention, low_attention_images, 
            savepath=savepath)

    yhats = np.concatenate(yhats, axis=0)
    ytrues = np.concatenate(ytrues, axis=0)
    features_case = np.concatenate(features_case, axis=0) 
    features_classifier = np.concatenate(features_classifier, axis=0)

    tlbase = os.path.splitext(os.path.basename(args.test_list))[0]
    savepath = '{}_{}.png'.format(args.savebase, tlbase)
    print('Saving case-wise projection: {}'.format(savepath))
    draw_class_projection(features_case, ytrues, yhats, savepath=savepath)
Beispiel #6
0
import sys

from bread import Bread
from chocolate import Chocolate
from exit import Exit
from milk import Milk

if __name__ == "__main__":
    itemList = [Milk(), Chocolate(), Bread(), Exit()]

    print("Item available")
    counter = 1
    for element in itemList:
        print(str(counter) + "." + element.get_name())
        counter = counter + 1

    while True:
        item = int(input("Enter the items you want"))
        if item == 4:
            print("thankyou for shopping with us")
            print("items in the basket:")
            for element in itemList:
                if element.get_name() != "Exit" and element.get_quantity() > 0:
                    print(element.get_name(), element.get_quantity())

            print("Total bill:")
            billAmount = 0
            for element in itemList:
                if element.get_name() != "Exit":
                    billAmount = billAmount + element.get_quantity(
                    ) * element.get_price()
Beispiel #7
0
import sys

sys.path.insert(0, '../..')
from milk import Milk
import tensorflow as tf

tf.enable_eager_execution()

model = Milk()
dummy_data = tf.zeros((20, 128, 128, 3), dtype=tf.float32)
dummy_y = model(dummy_data, verbose=True)
model.summary()