Пример #1
0
    def __init__(self, backend):

        self.train_data = dataset.train("/tmp/mnist_data")
        self.test_data = dataset.test("/tmp/mnist_data")
        self.backend = backend

        self.train_images = np.reshape(
            self.train_data['images'][:TRAIN_SIZE],
            (-1, IMAGE_SIZE, IMAGE_SIZE, IMAGE_DEPTH))
        self.train_labels = self.train_data['labels'][:TRAIN_SIZE]

        self.test_images = np.reshape(
            self.test_data['images'][:TRAIN_SIZE],
            (-1, IMAGE_SIZE, IMAGE_SIZE, IMAGE_DEPTH))
        self.test_labels = self.test_data['labels'][:TRAIN_SIZE]

        if backend == 'gpu':
            self.device = "/device:GPU:0"
        else:
            assert backend == 'cpu', 'Invalid backend specified: %s' % backend
            self.device = "/cpu:0"

        print("Creating model")

        self.model = MNIST.create_model()
Пример #2
0
def main(disable=0, device="cpu", cycles=100, D=32, N=128, name="evo"):

    disable = int(disable)
    cycles = int(cycles)

    print("Using device: %s" % device)

    N = int(N)
    D = int(D)

    data = dataset.create(N, D)
    test = dataset.test(N, D)

    if name == "evo":
        net = model.EvolutionaryModel(D, disable=disable).to(device)

        try:
            for i in range(cycles):
                net.do_cycle(*data, *test)
        except KeyboardInterrupt:
            pass

        best = net.select_best()
        print(best.net[0].weight.data)

        train.visualize(net.select_best(), outf="results.png", D=D)

    else:
        net = model.Model(D)
        train.train(*data, *test, net)
        print(net.net[0].weight)
Пример #3
0
def eval_input_fn(params):
    batch_size = params["batch_size"]
    data_dir = params["data_dir"]
    ds = dataset.test(data_dir).apply(
        tf.contrib.data.batch_and_drop_remainder(batch_size))
    images, labels = ds.make_one_shot_iterator().get_next()
    return images, labels
def main(unused):

    if FLAGS.run_gpu:
        backend = "/device:GPU:0"
    else:
        backend = "/cpu:0"

    mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn,
                                              model_dir=FLAGS.model_dir,
                                              params={
                                                  'backend': backend,
                                              })

    if FLAGS.mode == 'train' or FLAGS.mode == 'both':
        ds = dataset.train(FLAGS.data_dir)

        # Train the model
        train_input_fn = tf.estimator.inputs.numpy_input_fn(
            x={"x": ds['images']},
            y=ds['labels'],
            batch_size=FLAGS.batch_size,
            num_epochs=FLAGS.train_epochs,
            shuffle=True)

        mnist_classifier.train(input_fn=train_input_fn, steps=200)

    if FLAGS.mode == 'predict' or FLAGS.mode == 'both':
        ds = dataset.test(FLAGS.data_dir)

        # Predict test set
        pred_input_fn = tf.estimator.inputs.numpy_input_fn(
            x={"x": ds['images']}, shuffle=False)

        mnist_classifier.predict(input_fn=pred_input_fn)
Пример #5
0
def eval_input_fn(params):
  batch_size = params["batch_size"]
  data_dir = params["data_dir"]
  ds = dataset.test(data_dir).apply(
      tf.contrib.data.batch_and_drop_remainder(batch_size))
  images, labels = ds.make_one_shot_iterator().get_next()
  return images, labels
 async def send_to_net(self, image, loop=None):
     dataset = ModelDataset(image, self.augmentation)
     model = ResnetGenerator()
     model.load_state_dict(
         torch.load('/opt/app/model_gen_BtoA2.pth',
                    map_location=torch.device('cpu')))
     image1 = await loop.create_task(test(dataset, model, loop))
     image1 = Image.fromarray((image1 * 255).astype('uint8'))
     image1.save(self.imgByteArr, 'JPEG')
     im = self.imgByteArr.getvalue()
     return im
Пример #7
0
def train():
    questions = list(questions_from_dataset(dataset.train()))
    random.shuffle(questions)
    test_questions = list(questions_from_dataset(dataset.test()))
    random.shuffle(test_questions)

    i = 0
    for i, batch in enumerate(iterate_batches(questions, size=20)):
        n.train(batch)
        if i % 10 == 0:
            n.save(i)
Пример #8
0
def run_mnist_eager():
    """Run MNIST training and eval loop in eager mode.
    """

    data_dir = '/tmp/tensorflow/mnist/input_data' + str(ddl.rank())
    model_dir = '/tmp/tensorflow/mnist/checkpoints/' + str(ddl.rank()) + '/'

    # Delete model dir
    if os.path.isdir(model_dir) and ddl.local_rank() == 0:
        shutil.rmtree(model_dir)

    data_format = 'channels_first'

    # Load the datasets
    train_ds, _ = mnist_dataset.train(data_dir, (1, 28, 28), label_int=True)
    train_ds = train_ds.shard(ddl.size(),
                              ddl.rank()).shuffle(60000).batch(batch_size)
    test_ds, _ = mnist_dataset.test(data_dir, (1, 28, 28), label_int=True)
    test_ds = test_ds.batch(batch_size)

    # Create the model and optimizer
    model = create_model(data_format)
    optimizer = tf.train.MomentumOptimizer(0.01, 0.5)

    train_dir = None
    test_dir = None
    summary_writer = tf.contrib.summary.create_file_writer(train_dir,
                                                           flush_millis=10000)
    test_summary_writer = tf.contrib.summary.create_file_writer(
        test_dir, flush_millis=10000, name='test')

    # Create and restore checkpoint (if one exists on the path)
    checkpoint_prefix = os.path.join(model_dir, 'ckpt-r' + str(ddl.rank()))
    step_counter = tf.train.get_or_create_global_step()
    checkpoint = tf.train.Checkpoint(model=model,
                                     optimizer=optimizer,
                                     step_counter=step_counter)
    # Restore variables on creation if a checkpoint exists.
    checkpoint.restore(tf.train.latest_checkpoint(model_dir))

    # Train and evaluate for a set number of epochs.
    for _ in range(train_epochs):
        start = time.time()
        with summary_writer.as_default():
            train(model, optimizer, train_ds, step_counter, 10)
        end = time.time()
        if ddl.rank() == 0:
            print('\nTrain time for epoch #%d (%d total steps): %f' %
                  (checkpoint.save_counter.numpy() + 1, step_counter.numpy(),
                   end - start))
        with test_summary_writer.as_default():
            test(model, test_ds)
        checkpoint.save(checkpoint_prefix)
Пример #9
0
def main(_):
    tfe.enable_eager_execution()

    (device, data_format) = ('/gpu:0', 'channels_first')
    if FLAGS.no_gpu or tfe.num_gpus() <= 0:
        (device, data_format) = ('/cpu:0', 'channels_last')
    print('Using device %s, and data format %s.' % (device, data_format))

    # Load the datasets
    train_ds = dataset.train(FLAGS.data_dir).shuffle(60000).batch(
        FLAGS.batch_size)
    test_ds = dataset.test(FLAGS.data_dir).batch(FLAGS.batch_size)

    # Create the model and optimizer
    model = mnist.Model(data_format)
    optimizer = tf.train.MomentumOptimizer(FLAGS.lr, FLAGS.momentum)

    if FLAGS.output_dir:
        # Create directories to which summaries will be written
        # tensorboard --logdir=<output_dir>
        # can then be used to see the recorded summaries.
        train_dir = os.path.join(FLAGS.output_dir, 'train')
        test_dir = os.path.join(FLAGS.output_dir, 'eval')
        tf.gfile.MakeDirs(FLAGS.output_dir)
    else:
        train_dir = None
        test_dir = None
    summary_writer = tf.contrib.summary.create_file_writer(train_dir,
                                                           flush_millis=10000)
    test_summary_writer = tf.contrib.summary.create_file_writer(
        test_dir, flush_millis=10000, name='test')
    checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, 'ckpt')
    step_counter = tf.train.get_or_create_global_step()
    checkpoint = tfe.Checkpoint(model=model,
                                optimizer=optimizer,
                                step_counter=step_counter)
    # Restore variables on creation if a checkpoint exists.
    checkpoint.restore(tf.train.latest_checkpoint(FLAGS.checkpoint_dir))
    # Train and evaluate for 10 epochs.
    with tf.device(device):
        for _ in range(10):
            start = time.time()
            with summary_writer.as_default():
                train(model, optimizer, train_ds, step_counter,
                      FLAGS.log_interval)
            end = time.time()
            print('\nTrain time for epoch #%d (%d total steps): %f' %
                  (checkpoint.save_counter.numpy() + 1, step_counter.numpy(),
                   end - start))
            with test_summary_writer.as_default():
                test(model, test_ds)
            checkpoint.save(checkpoint_prefix)
Пример #10
0
def main(_):
  tfe.enable_eager_execution()

  (device, data_format) = ('/gpu:0', 'channels_first')
  if FLAGS.no_gpu or tfe.num_gpus() <= 0:
    (device, data_format) = ('/cpu:0', 'channels_last')
  print('Using device %s, and data format %s.' % (device, data_format))

  # Load the datasets
  train_ds = dataset.train(FLAGS.data_dir).shuffle(60000).batch(
      FLAGS.batch_size)
  test_ds = dataset.test(FLAGS.data_dir).batch(FLAGS.batch_size)

  # Create the model and optimizer
  model = mnist.Model(data_format)
  optimizer = tf.train.MomentumOptimizer(FLAGS.lr, FLAGS.momentum)

  if FLAGS.output_dir:
    # Create directories to which summaries will be written
    # tensorboard --logdir=<output_dir>
    # can then be used to see the recorded summaries.
    train_dir = os.path.join(FLAGS.output_dir, 'train')
    test_dir = os.path.join(FLAGS.output_dir, 'eval')
    tf.gfile.MakeDirs(FLAGS.output_dir)
  else:
    train_dir = None
    test_dir = None
  summary_writer = tf.contrib.summary.create_file_writer(
      train_dir, flush_millis=10000)
  test_summary_writer = tf.contrib.summary.create_file_writer(
      test_dir, flush_millis=10000, name='test')
  checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, 'ckpt')
  step_counter = tf.train.get_or_create_global_step()
  checkpoint = tfe.Checkpoint(
      model=model, optimizer=optimizer, step_counter=step_counter)
  # Restore variables on creation if a checkpoint exists.
  checkpoint.restore(tf.train.latest_checkpoint(FLAGS.checkpoint_dir))
  # Train and evaluate for 10 epochs.
  with tf.device(device):
    for _ in range(10):
      start = time.time()
      with summary_writer.as_default():
        train(model, optimizer, train_ds, step_counter, FLAGS.log_interval)
      end = time.time()
      print('\nTrain time for epoch #%d (%d total steps): %f' %
            (checkpoint.save_counter.numpy() + 1,
             step_counter.numpy(),
             end - start))
      with test_summary_writer.as_default():
        test(model, test_ds)
      checkpoint.save(checkpoint_prefix)
Пример #11
0
    def run_create_concrete():
        path = "./configs/pairs/"
        config_files = [
            join(path, f) for f in listdir(path) if isfile(join(path, f))
            if ".py" not in f and "generic" not in f and "__" not in f
        ]

        for config_file in config_files:
            print("MMMMMMMMMMMMMMMMMMMMMMMMMM", config_file)
            _config = process_config(config_file, create_folders=False)

            data_root = join(_config.data_root, "valid")

            ds = dataset.Dataset(data_root,
                                 specific_folders=_config.specific_classes)
            classes = ds.classes

            ## Find checkpoint
            print(_config.exp_name)
            checkpoint_file = glob(
                f"./experiments/{_config.exp_name}*/checkpoints/killed*.pth"
            )[0]
            self.load_checkpoint(checkpoint_file)
            print("CHECKPOINT!", checkpoint_file)

            input_channels = int(_config.input_channels)

            loader, test_ds = dataset.test(
                "./input/quickdraw/test_simplified.csv",
                200,
                _config.image_size,
                num_workers=8,
                input_channels=input_channels)

            self.validate_concrete("test", classes, loader, ds)

            ## valid
            ds = self.valid_dataset
            loader = self.valid_data_loader

            self.validate_concrete("valid", classes, loader, ds)
Пример #12
0
def openfile():
    filepath = filedialog.askopenfilename(initialdir="test",
                                          title="Тест",
                                          filetypes=(("png файлы", "*.png"),
                                                     ("png файлы", "*.png")))
    test_image = test(filepath, 1024)
    imgt = ImageTk.PhotoImage(Image.open(filepath))
    panel.config(image=imgt)
    panel.image = imgt

    number_of_answer = ""
    max_weight = 0
    print("Нейросеть думает...\n")
    test_data = network.query(test_image)
    for g in range(len(test_data)):
        print(ALPHABET.get(g) + ") ", test_data[g])
        if max_weight < test_data[g]:
            max_weight = test_data[g]
            number_of_answer = g
    answer = ALPHABET.get(number_of_answer)

    label2.config(text=answer)
    print("\nНейросеть думает, что на картинке: ", answer, '\n')
Пример #13
0
import torch
import test_model
import dataset
import os
import loss
import tqdm
import cv2
import predict as pt

model = test_model.YoloModel().cuda()
model = torch.nn.DataParallel(model).cuda()

print('preparing data...')
train_data = dataset.train()
test_data = dataset.test()
print('done')
MODEL_PATH = 'yolo1.h5'
if os.path.exists(MODEL_PATH):
    print('loading saved state...')
    model.load_state_dict(torch.load(MODEL_PATH))
    print('loading done')
print('start traing...')
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=5e-4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                               step_size=1,
                                               gamma=0.95)
loss_fun = loss.yololoss()
bestloss = 1e10
e = 0
# for e in range(1200):
while True:
 def eval_input_fn():
     ds = dataset.test(data_path)
     ds = ds.batch(params['batch_size'])
     return ds
Пример #15
0
 def eval_input_fn():
     return dataset.test(DATA_DIR).batch(
         FLAGS.batch_size).make_one_shot_iterator().get_next()
Пример #16
0
 def eval_input_fn():
   return tf.compat.v1.data.make_one_shot_iterator(dataset.test(DATA_DIR).batch(FLAGS.batch_size)).get_next()
Пример #17
0
congress = dataset.fromFile('congress', 0)
r = congress.tree()

if __name__ == '__main__':

    print(r)
    res, pre, rec, har = r.stats()
    print("\nresults: " + str(res))
    print("precision: " + str(pre))
    print("recall: " + str(rec))
    print("harmonic mean " + str(har))
    
    print("\nPRUNED:\n")
    r.prune(0.33)
    
    print(r)
    res, pre, rec, har = r.stats()
    print("\nresults: " + str(res))
    print("precision: " + str(pre))
    print("recall: " + str(rec))
    print("harmonic mean " + str(har))

    print("\nTESTING:\n")
    r, stats = dataset.test(congress, 50)
    
    print(r)
    res, pre, rec, har = stats
    print("\nresults: " + str(res))
    print("precision: " + str(pre))
    print("recall: " + str(rec))
    print("harmonic mean " + str(har))
Пример #18
0
def eval_full_data(params):
    batch_size = params['batch_size']
    data_dir = params['data_dir']
    data = dataset.test(data_dir)
    data = data.batch(batch_size, drop_remainder=True)
    return data
Пример #19
0
def main(params):
    # basic parameters
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint_path',
                        type=str,
                        default=None,
                        required=True,
                        help='The path to the pretrained weights of model')
    parser.add_argument(
        '--crop_height',
        type=int,
        default=640,
        help='Height of cropped/resized input image to network')
    parser.add_argument('--crop_width',
                        type=int,
                        default=640,
                        help='Width of cropped/resized input image to network')
    parser.add_argument('--data',
                        type=str,
                        default='/path/to/data',
                        help='Path of training data')
    parser.add_argument('--batch_size',
                        type=int,
                        default=1,
                        help='Number of images in each batch')
    parser.add_argument('--context_path',
                        type=str,
                        default="resnet101",
                        help='The context path model you are using.')
    parser.add_argument('--cuda',
                        type=str,
                        default='0',
                        help='GPU ids used for training')
    parser.add_argument('--use_gpu',
                        type=bool,
                        default=True,
                        help='Whether to user gpu for training')
    parser.add_argument('--num_classes',
                        type=int,
                        default=2,
                        help='num of object classes (with void)')
    args = parser.parse_args(params)

    # create dataset and dataloader

    dataloader = DataLoader(test(input_transform, target_transform),
                            num_workers=1,
                            batch_size=1,
                            shuffle=True)
    # build model
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda
    model = DANet(nclass=2, backbone='resnet50', aux=False, se_loss=False)
    model = model.cuda()

    # load pretrained model if exists
    print('load model from %s ...' % args.checkpoint_path)
    model.load_state_dict(torch.load(args.checkpoint_path))
    print('Done!')

    # test
    eval(model, dataloader, args)
Пример #20
0
def run_mnist_eager(flags_obj):
    """
    Run MNIST training and eval loop in eager mode.

    Args:
      flags_obj: An object containing parsed flag values.
    """

    # Soft placement
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    tfe.enable_eager_execution(config=config)

    model_helpers.apply_clean(flags.FLAGS)

    # Automatically determine device and data_format
    (device, data_format) = ('/gpu:0', 'channels_first')
    if flags_obj.no_gpu or not tf.test.is_gpu_available():
        (device, data_format) = ('/cpu:0', 'channels_last')

    # If data_format is defined in FLAGS, overwrite automatically set value.
    if flags_obj.data_format is not None:
        data_format = flags_obj.data_format

    print('Using device %s, and data format %s.' % (device, data_format))

    # Load the datasets
    train_ds = mnist_dataset.train(flags_obj.data_dir).shuffle(60000).batch(
        flags_obj.batch_size)
    test_ds = mnist_dataset.test(flags_obj.data_dir).batch(
        flags_obj.batch_size)

    # Create the model and optimizer
    model = model_lib.create_model(data_format)
    optimizer = tf.train.MomentumOptimizer(flags_obj.lr, flags_obj.momentum)

    # Print model summary
    print(model.summary())

    # Create file writers for writing TensorBoard summaries.
    if flags_obj.output_dir:
        # Create directories to which summaries will be written
        # tensorboard --logdir=<output_dir>
        # can then be used to see the recorded summaries.
        train_dir = os.path.join(flags_obj.output_dir, 'train')
        test_dir = os.path.join(flags_obj.output_dir, 'eval')
        tf.gfile.MakeDirs(flags_obj.output_dir)
    else:
        train_dir = None
        test_dir = None

    summary_writer = tf.contrib.summary.create_file_writer(train_dir,
                                                           flush_millis=10000)
    test_summary_writer = tf.contrib.summary.create_file_writer(
        test_dir, flush_millis=10000, name='test')

    # Create and restore checkpoint (if one exists on the path)
    checkpoint_prefix = os.path.join(flags_obj.model_dir, 'ckpt')
    step_counter = tf.train.get_or_create_global_step()
    checkpoint = tf.train.Checkpoint(model=model,
                                     optimizer=optimizer,
                                     step_counter=step_counter)
    # Restore variables on creation if a checkpoint exists.
    checkpoint.restore(tf.train.latest_checkpoint(flags_obj.model_dir))

    # Train and evaluate for a set number of epochs.
    with tf.device(device):
        for _ in range(flags_obj.train_epochs):
            start = time.time()
            with summary_writer.as_default():
                train(model, optimizer, train_ds, step_counter,
                      flags_obj.log_interval)
            end = time.time()

            # Note time taken
            print('\nTrain time for epoch #%d (%d total steps): %f' %
                  (checkpoint.save_counter.numpy() + 1, step_counter.numpy(),
                   end - start))
            with test_summary_writer.as_default():
                test(model, test_ds)
            checkpoint.save(checkpoint_prefix)
#from sklearn.model_selection import train_test_split
import numpy as np

import dataset
import os

batch_size = 64
num_classes = 62
epochs = 10
img_rows, img_cols = 28, 28

print('Start loading data.')
#Da modificar
folder_path = os.getcwd()
train_dataset = dataset.train(folder_path + '\emnist')
test_dataset = dataset.test(folder_path + '\emnist')
print('Data has been loaded.')

#Non so se le reshape dei tensor x e y contenenti train e test vadano fatte
if K.image_data_format() == 'channels_first':
    #x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
    #x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    #x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    #x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)
"""print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')"""
Пример #22
0
 def eval_input_fn():
     return dataset.test('%s/validation' % FLAGS.data_dir).batch(
         FLAGS.batch_size).make_one_shot_iterator().get_next()
Пример #23
0
def eval_input_fn(params):
    batch_size = params["batch_size"]
    data_dir = params["data_dir"]
    ds = dataset.test(data_dir).batch(batch_size, drop_remainder=True)
    return ds
Пример #24
0
 def eval_input_fn():
   return dataset.test(FLAGS.data_dir).batch(
       FLAGS.batch_size).make_one_shot_iterator().get_next()
 def eval_input_fn():
   ds = dataset.test(data_dir)
   ds = ds.batch(batch_size)
   return ds
Пример #26
0
import tensorflow as tf
import dataset

sess = tf.InteractiveSession()

mnist_train = dataset.train("./mnist_data")
mnist_test = dataset.test("./mnist_data")

# https://www.tensorflow.org/guide/datasets
print(mnist_train.output_shapes, mnist_train.output_types)
print(mnist_test.output_shapes, mnist_test.output_types)
batched_train = mnist_train.batch(100)

iterator = batched_train.make_one_shot_iterator()
next_element = iterator.get_next()

x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

# y is a matrix
y = tf.nn.softmax(tf.matmul(x, W) + b)

# TODO: try [None, 10]
y_ = tf.placeholder(tf.int32, [None])
# TODO: why this not work?
# y_ = tf.placeholder(tf.int32, [None, 1])
# y_ = tf.placeholder(tf.float32, [None, 10])

# cause y is a 2D matrix, note the meaning of reduce_sum
one_hot_y = tf.one_hot(y_, 10)
def eval_input_fn(data_dir, batch_size=100):
  features = dataset.test(data_dir).batch(
      batch_size=batch_size).make_one_shot_iterator().get_next()
  return {'pixels': features[0]}, features[1]
Пример #28
0
    def test(self):
        self.model.eval()

        test_data_loader, test_dataset = dataset.test(
            self.config.testcsv,
            self.config.batch_size,
            self.config.image_size,
            num_workers=8,
            input_channels=self.input_channels)

        self.load_checkpoint(self.config.checkpoint)

        cls_to_idx = {
            cls: idx
            for idx, cls in enumerate(self.train_dataset.classes)
        }
        print(cls_to_idx)
        idx_to_cls = {cls_to_idx[c]: c for c in cls_to_idx}

        def row2string(r):
            v = [r[-1], r[-2], r[-3]]
            v = [v.item() for v in v]
            v = map(lambda v: v if v < 340 else 0, v)

            v = [idx_to_cls[v].replace(' ', '_') for v in v]

            return ' '.join(v)

        labels = []
        key_ids = []
        outputs = []

        with torch.no_grad():
            for idx, (data, target) in enumerate(test_data_loader):
                data = data.to(self.device)
                output = self.model(data)

                n = output.detach().cpu().numpy()
                outputs.append(n)

                order = np.argsort(n, 1)[:, -3:]
                #                order = refine(order, "test", idx * self.config.batch_size)

                predicted_y = [row2string(o) for o in order]
                labels = labels + predicted_y
                key_ids = key_ids + target.numpy().tolist()

                if idx % 10 == 0:
                    print(f"{idx} of  {len(test_data_loader)}")

        import pickle

        with open('labels', 'wb') as fp:
            pickle.dump(key_ids, fp)
        with open('idx_to_cls', 'wb') as fp:
            pickle.dump(idx_to_cls, fp)

        _all = np.concatenate(outputs)
        np.save("output.npy", _all)

        d = {'key_id': key_ids, 'word': labels}
        df = pd.DataFrame.from_dict(d)
        df.to_csv('submission.csv', index=False)
Пример #29
0
def main(argv):
    parser = MNISTEagerArgParser()
    flags = parser.parse_args(args=argv[1:])

    # TF v1.7
    tfe.enable_eager_execution()

    # Automatically determine device and data_format
    (device, data_format) = ('/gpu:0', 'channels_first')
    if flags.no_gpu or tfe.num_gpus() <= 0:
        (device, data_format) = ('/cpu:0', 'channels_last')
    # If data_format is defined in FLAGS, overwrite automatically set value.
    if flags.data_format is not None:
        data_format = flags.data_format

    # Log Info
    print("-" * 64)
    print("TEST INFO - EAGER")
    print("-" * 64)
    print("TF version:\t {}".format(tf.__version__))
    print("Eager execution:\t {}".format(tf.executing_eagerly()))
    print("Dataset:\t MNIST")
    print("Model:\t CNN")
    print('Device:\t {}'.format(device))

    if data_format == 'channels_first':
        print("Data format:\t NCHW (channel first)")
    else:
        print("Data format:\t NHWC (channel last)")

    print("=" * 64)

    # Load the datasets
    train_ds = mnist_dataset.train(flags.data_dir).shuffle(60000).batch(
        flags.batch_size)
    test_ds = mnist_dataset.test(flags.data_dir).batch(flags.batch_size)

    # Create the model and optimizer
    # model = create_model(data_format)
    model = MNISTModel(data_format)
    optimizer = tf.train.MomentumOptimizer(flags.lr, flags.momentum)

    # Create file writers for writing TensorBoard summaries.
    if flags.output_dir:
        # Create directories to which summaries will be written
        # tensorboard --logdir=<output_dir>
        # can then be used to see the recorded summaries.
        train_dir = os.path.join(flags.output_dir, 'train')
        test_dir = os.path.join(flags.output_dir, 'eval')
        tf.gfile.MakeDirs(flags.output_dir)
    else:
        train_dir = None
        test_dir = None
    summary_writer = tf.contrib.summary.create_file_writer(train_dir,
                                                           flush_millis=10000)
    test_summary_writer = tf.contrib.summary.create_file_writer(
        test_dir, flush_millis=10000, name='test')

    # Create and restore checkpoint (if one exists on the path)
    checkpoint_prefix = os.path.join(flags.model_dir, 'ckpt')
    step_counter = tf.train.get_or_create_global_step()
    checkpoint = tfe.Checkpoint(model=model,
                                optimizer=optimizer,
                                step_counter=step_counter)
    # Restore variables on creation if a checkpoint exists.
    checkpoint.restore(tf.train.latest_checkpoint(flags.model_dir))

    # Train and evaluate for a set number of epochs.
    with tf.device(device):
        for _ in range(flags.train_epochs):
            start = time.time()
            with summary_writer.as_default():
                train(model, optimizer, train_ds, step_counter,
                      flags.log_interval)
            end = time.time()
            print('\nTrain time for epoch #%d (%d total steps): %f' %
                  (checkpoint.save_counter.numpy() + 1, step_counter.numpy(),
                   end - start))
            with test_summary_writer.as_default():
                test(model, test_ds)
            checkpoint.save(checkpoint_prefix)
Пример #30
0
def eval_data():
    data = dataset.test(FLAGS.data_dir)
    data = data.cache()
    data = data.batch(FLAGS.batch_size)
    return data
Пример #31
0
def show_heatmap():
    test_questions = list(questions_from_dataset(dataset.test()))
    random.shuffle(test_questions)
    para, question = test_questions[0]
    show_html.show(generate_heatmap(n, para, question).encode('utf-8'))
Пример #32
0
def mnist(learning_rate, initializer_mode, num_conv_layers, num_fc_layers):
    if num_conv_layers not in [1, 2]:
        raise ValueError("num_conv_layers should be 1 or 2")
    if num_fc_layers not in [1, 2]:
        raise ValueError("num_fc_layers should be 1 or 2")

    def make_hyperparameter_string(learning_rate, initializer_mode,
                                   num_conv_layers, num_fc_layers):
        hyperparameter = "lr_%.e_" % learning_rate
        if initializer_mode == 0:
            hyperparameter += "xavier_constant"
        else:
            hyperparameter += "truncated_normal_constant"
        hyperparameter += "_%d_conv_%d_fc" % (num_conv_layers, num_fc_layers)
        return hyperparameter

    learning_rate = learning_rate
    if initializer_mode == 0:
        weights_initializer = tf.contrib.layers.xavier_initializer()
        biases_initializer = tf.constant_initializer(0.1)
    else:
        weights_initializer = tf.truncated_normal_initializer(stddev=0.1)
        biases_initializer = tf.constant_initializer(0.1)
    logdir = "logs/mnist/" + make_hyperparameter_string(
        learning_rate, initializer_mode, num_conv_layers, num_fc_layers)

    if not tf.gfile.Exists(logdir):
        tf.gfile.MakeDirs(logdir)

    def mnist_net(x):
        endpoints = {}
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                            activation_fn=tf.nn.relu,
                            weights_initializer=weights_initializer,
                            biases_initializer=biases_initializer):
            with slim.arg_scope([slim.conv2d, slim.max_pool2d],
                                stride=1,
                                padding="SAME"):
                net = slim.conv2d(x, 32, [5, 5], scope="conv1")
                net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool1")
                endpoints["block1"] = net
                if num_conv_layers == 2:
                    net = slim.conv2d(net, 64, [5, 5], scope="conv2")
                    net = slim.max_pool2d(net, [2, 2], stride=2, scope="pool2")
                    endpoints["block2"] = net
                    net = tf.reshape(net, shape=[-1, 7 * 7 * 64])
                elif num_conv_layers == 1:
                    net = tf.reshape(net, shape=[-1, 14 * 14 * 32])
                if num_fc_layers == 1:
                    logits = slim.fully_connected(net, 10, scope="fc")
                else:
                    logits = slim.stack(net,
                                        slim.fully_connected, [1024, 10],
                                        scope="fc")
                endpoints["logits"] = logits
        return logits, endpoints

    # ValueError: Variable conv1/weights already exists, disallowed.
    # Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope? Originally defined at:
    graph = tf.Graph()
    with graph.as_default():
        with tf.name_scope("input"):
            images = tf.placeholder(tf.float32,
                                    shape=[None, 784],
                                    name="images")
            images_3d = tf.reshape(images,
                                   shape=[-1, 28, 28, 1],
                                   name="images_3d")
            labels = tf.placeholder(tf.uint8, shape=[None], name="labels")
            onehot_labels = tf.one_hot(indices=labels,
                                       depth=10,
                                       name="onehot_labels")

        logits, endpoints = mnist_net(images_3d)

        with tf.name_scope("loss"):
            # loss = slim.losses.softmax_cross_entropy(logits, onehot_labels)
            loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels,
                                                   logits=logits)
            # 要注释掉这一行,否则会在 tensorboard 中出现两次 softmax_cross_entropy_loss,应该是上面的一行已经加了一次了
            # tf.losses.add_loss(loss) # Letting TF-Slim know about the additional loss.
            total_loss = tf.losses.get_total_loss(
                add_regularization_losses=False)
            # tf.add_to_collection('EXTRA_LOSSES', total_loss)

        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(
                labels, tf.cast(tf.argmax(logits, axis=1), tf.uint8))
            accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))

        with tf.name_scope("optimize"):
            optimizer = tf.train.AdamOptimizer(learning_rate)
            # create_train_op ensures that each time we ask for the loss, the update_ops
            # are run and the gradients being computed are applied too.
            train_op = slim.learning.create_train_op(total_loss, optimizer)

        # batch size 100 要比 30 好很多,也要稳很多
        train_set = dataset.train("MNIST-data").cache().shuffle(
            buffer_size=1000).batch(100).repeat(num_epoch)
        test_set = dataset.test("MNIST-data").cache().batch(30).repeat()

        iterator = train_set.make_one_shot_iterator()
        one_element = iterator.get_next()
        iterator_test = test_set.make_one_shot_iterator()
        one_element_test = iterator_test.get_next()

        init_op = tf.global_variables_initializer()
        log_writer = tf.summary.FileWriter(logdir)

        # summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
        summaries = set()
        for key in endpoints:
            summaries.add(tf.summary.histogram("block/" + key, endpoints[key]))
        for variable in slim.get_model_variables():
            summaries.add(tf.summary.histogram(variable.op.name, variable))
        for loss in tf.get_collection(tf.GraphKeys.LOSSES):
            summaries.add(tf.summary.scalar(loss.op.name, loss))
        # for loss in tf.get_collection('EXTRA_LOSSES'):
        # summaries.add(tf.summary.scalar(loss.op.name, loss))
        # summaries.add(tf.summary.scalar("accuracy", accuracy))
        accuracy_train_summary_op = tf.summary.scalar("accuracy_train",
                                                      accuracy)
        accuracy_test_summary_op = tf.summary.scalar("accuracy_test", accuracy)
        summaries.add(tf.summary.image("image", images_3d, 4))
        summary_op = tf.summary.merge(list(summaries), name='summary_op')

        step = 0
        with tf.Session() as sess:
            log_writer.add_graph(sess.graph)
            sess.run(init_op)
            try:
                while True:
                    images_, labels_ = sess.run(one_element)
                    sess.run(train_op,
                             feed_dict={
                                 images: images_,
                                 labels: labels_
                             })
                    if step % 10 == 0:
                        summary_, accuracy_train_summary = sess.run(
                            [summary_op, accuracy_train_summary_op],
                            feed_dict={
                                images: images_,
                                labels: labels_
                            })
                        images_, labels_ = sess.run(one_element_test)
                        accuracy_test_summary = sess.run(
                            accuracy_test_summary_op,
                            feed_dict={
                                images: images_,
                                labels: labels_
                            })
                        log_writer.add_summary(summary_, step)
                        log_writer.add_summary(accuracy_train_summary, step)
                        log_writer.add_summary(accuracy_test_summary, step)
                    step += 1
            except tf.errors.OutOfRangeError:
                print("Finished")
        log_writer.close()
Пример #33
0
# data_dir
data_dir = "/tmp/mnist_convnet_model_data" + str(ddl.rank())

input_shape = ()
if K.image_data_format() == 'channels_first':
    input_shape = (1, img_rows, img_cols)
else:
    input_shape = (img_rows, img_cols, 1)

# the data, split between train and test sets
(train_set, num_of_train_imgs) = dataset.train(data_dir, input_shape)
train_set = train_set.shard(ddl.size(), ddl.rank())
train_set = train_set.cache().shuffle(
    buffer_size=1000).batch(batch_size).repeat()

(eval_set, num_of_test_imgs) = dataset.test(data_dir, input_shape)
eval_full = eval_set
eval_set = eval_set.shard(ddl.size(), ddl.rank())
eval_set = eval_set.batch(batch_size).repeat()

num_of_all_test_imgs = num_of_test_imgs
num_of_train_imgs /= ddl.size()
num_of_test_imgs /= ddl.size()

model = Sequential()
model.add(
    Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
Пример #34
0
def predict_input_fn(params):
    batch_size = params["batch_size"]
    data_dir = params["data_dir"]
    # Take out top 10 samples from test data to make the predictions.
    ds = dataset.test(data_dir).take(10).batch(batch_size)
    return ds
Пример #35
0
 def eval_input_fn():
     return dataset.test(flags_obj.data_dir).batch(
         flags_obj.batch_size).make_one_shot_iterator().get_next()
Пример #36
0
'''Data set of car conditions and whether a buyer would buy it 
from http://archive.ics.uci.edu/ml/datasets/Car+Evaluation
'''
import dataset
   
car = dataset.fromFile('car')
r = car.tree()
    
if __name__ == '__main__':
    r.prune(.1)
    
    print(r)
    res, pre, rec, har = r.stats()
    print("\nresults: " + str(res))
    print("precision: " + str(pre))
    print("recall: " + str(rec))
    print("harmonic mean " + str(har))
    
    print("\nTESTING:\n")
    r, stats = dataset.test(car, 50, .1)
    
    print(r)
    res, pre, rec, har = stats
    print("\nresults: " + str(res))
    print("precision: " + str(pre))
    print("recall: " + str(rec))
    print("harmonic mean " + str(har))