예제 #1
0
def get_input_evaluation_tensors(reader,
                                 data_pattern,
                                 batch_size=1024,
                                 num_readers=1):
    """Creates the section of the graph which reads the evaluation data.

  Args:
    reader: A class which parses the training data.
    data_pattern: A 'glob' style path to the data files.
    batch_size: How many examples to process at a time.
    num_readers: How many I/O threads to use.

  Returns:
    A tuple containing the features tensor, labels tensor, and optionally a
    tensor containing the number of frames per video. The exact dimensions
    depend on the reader being used.

  Raises:
    IOError: If no files matching the given pattern were found.
  """
    logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
    with tf.name_scope("eval_input"):
        files = gfile.Glob(data_pattern)
        if not files:
            raise IOError("Unable to find the evaluation files.")
        logging.info("number of evaluation files: " + str(len(files)))
        filename_queue = tf.train.string_input_producer(files,
                                                        shuffle=False,
                                                        num_epochs=1)
        eval_data = [
            reader.prepare_reader(filename_queue) for _ in range(num_readers)
        ]
        return tf.train.batch_join(eval_data,
                                   batch_size=batch_size,
                                   capacity=3 * batch_size,
                                   allow_smaller_final_batch=True,
                                   enqueue_many=True)
예제 #2
0
def get_features_and_labels(feature_names, input_tfrecord_data_path, num_classes):
    """
    Utility function to get the features and labels from the multiclass
    samples' tfrecords

    :param feature_names:
    :param input_tfrecord_data_path:
    :param num_classes:
    :return:
    """
    list_of_feature_names = [
        feature_names.strip() for feature_names in feature_names.split(',')]
    # now read the input tfrecord files from the given path
    files = gfile.Glob(input_tfrecord_data_path)
    if not files:
        raise IOError("Unable to find training files. tfrecord_data_path='" +
                      input_tfrecord_data_path + "'.")
    logging.info("Number of training files: %s.", str(len(files)))
    filename_queue = tf.train.string_input_producer(files, num_epochs=1, shuffle=False)
    reader = tf.TFRecordReader()
    filename, serialized_example = reader.read(filename_queue)
    contexts, features = tf.parse_single_sequence_example(
        serialized_example,
        context_features={"video_id": tf.FixedLenFeature(
            [], tf.string),
            "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            feature_name: tf.FixedLenSequenceFeature([], dtype=tf.string)
            for feature_name in list_of_feature_names
        })
    context_video_id = contexts["video_id"]
    # read ground truth labels
    labels = (tf.cast(
        tf.sparse_to_dense(contexts["labels"].values, (num_classes,), 1,
                           validate_indices=False),
        tf.int32))
    return context_video_id, features, labels
예제 #3
0
def read(output_filename):
    files = gfile.Glob(output_filename)
    filename_queue = tf.train.string_input_producer(files,
                                                    shuffle=False,
                                                    num_epochs=1)

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)

    keys_to_features = {
        'video_id': tf.FixedLenFeature((), tf.string, default_value=''),
        'labels': tf.VarLenFeature(tf.int64),
        'feas': tf.FixedLenFeature((), tf.string, default_value=''),
    }
    contexts = tf.parse_single_example(serialized_example, keys_to_features)
    feas = contexts["feas"]
    feas = tf.reshape(tf.decode_raw(feas, tf.float32), [-1])

    dense_labels = (tf.cast(
        tf.sparse_to_dense(contexts["labels"].values, (10, ),
                           1,
                           validate_indices=False), tf.bool))

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        # feas_val, labels, video_id = sess.run([feas, contexts["labels"], contexts["video_id"]])
        feas_val, labels, video_id = sess.run(
            [feas, dense_labels, contexts["video_id"]])
        print(feas_val.shape)
        print(np.reshape(feas_val, [256, 256]))
        print(labels)
        print(video_id)
        # print(sess.run(labels))
        coord.request_stop()
        coord.join(threads)
예제 #4
0
def get_input_data_tensors(reader, data_pattern, batch_size, num_readers=1):
    """Creates the section of the graph which reads the input data.

  Args:
    reader: A class which parses the input data.
    data_pattern: A 'glob' style path to the data files.
    batch_size: How many examples to process at a time.
    num_readers: How many I/O threads to use.

  Returns:
    A tuple containing the features tensor, labels tensor, and optionally a
    tensor containing the number of frames per video. The exact dimensions
    depend on the reader being used.

  Raises:
    IOError: If no files matching the given pattern were found.
  """
    with tf.name_scope("input"):
        files = gfile.Glob(data_pattern)
        if not files:
            raise IOError("Unable to find input files. data_pattern='" +
                          data_pattern + "'")
        logging.info("number of input files: " + str(len(files)))
        print(files)
        filename_queue = tf.train.string_input_producer(files,
                                                        num_epochs=1,
                                                        shuffle=False)
        examples_and_labels = [
            reader.prepare_reader(filename_queue) for _ in range(num_readers)
        ]

        imageInput, seq_len, target = (tf.train.batch_join(
            examples_and_labels,
            batch_size=batch_size,
            allow_smaller_final_batch=True,
            enqueue_many=True))
        return imageInput, seq_len, target
예제 #5
0
파일: inputs.py 프로젝트: jpgard/ffn
def create_filename_queue(coordinates_file_pattern, shuffle=True):
    """Creates a queue for reading coordinates from coordinate file.

  Args:
    coordinates_file_pattern: File pattern for TFRecords of
                              input examples of the form of a glob
                              pattern or path@shards.
    shuffle: Whether to shuffle the coordinate file list. Note that the expanded
             coordinates_file_pattern is not guaranteed to be sorted
             alphabetically.

  Returns:
    Tensorflow queue with coordinate filenames
  """
    m = re.search(r'@(\d{1,})', coordinates_file_pattern)
    if m:
        num_shards = int(m.group(1))
        coord_file_list = [
            re.sub(r'@(\d{1,})', '-%.5d-of-%.5d' % (i, num_shards),
                   coordinates_file_pattern) for i in range(num_shards)
        ]
    else:
        coord_file_list = gfile.Glob(coordinates_file_pattern)
    return tf.train.string_input_producer(coord_file_list, shuffle=shuffle)
예제 #6
0
def get_input_train_tensors(reader,
                            data_pattern,
                            batch_size=128,
                            num_epochs=None,
                            num_readers=2):
    """
    Creates the section of the graph which reads the training data .
    :param reader: A class which parses the training data .
    :param data_pattern: A 'glob' style path to the data files .
    :param batch_size: How many examples to process at a time .
    :param num_epochs: How many passes to make over the training data. Set to 'None' to run indefinitely.
    :param num_readers: How many I/O threads to use .
    :return: A tuple containing the features tensor, labels tensor
    """
    logging.info("Using batch size of {} for training . ".format(batch_size))
    with tf.name_scope("train_input"):
        files = gfile.Glob(data_pattern)
        if not files:
            raise IOError(
                "Unable to find training files. data_pattern={} . ".format(
                    data_pattern))
        logging.info("Number of training files: {}. ".format(len(files)))
        filename_queue = tf.train.string_input_producer(files,
                                                        num_epochs=num_epochs,
                                                        shuffle=True)
        train_data = [
            reader.prepare_reader(filename_queue, True)
            for _ in range(num_readers)
        ]

        return tf.train.shuffle_batch_join(train_data,
                                           batch_size=batch_size,
                                           capacity=batch_size * 5,
                                           min_after_dequeue=batch_size,
                                           allow_smaller_final_batch=True,
                                           enqueue_many=True)
예제 #7
0
def run(p, params):
    """Defines Beam preprocessing pipeline.

  Performs the following:
    - Reads text files from pattern.
    - Split text files in train and validation sets.

  Args:
    p: PCollection, initial pipeline.
    params: Object holding a set of parameters as name-value pairs.
  """

    path_pattern = os.path.join(params.input_dir, '*',
                                '*{}'.format(constants.FILE_EXTENSION))
    data = (p
            | 'ListFiles' >> beam.Create(gfile.Glob(path_pattern))
            | 'ReadFiles' >> beam.ParDo(ReadFile())
            | 'SplitData' >> beam.ParDo(
                _SplitData(),
                train_size=params.train_size,
                val_label=_DatasetType.VAL.name).with_outputs(
                    _DatasetType.VAL.name, main=_DatasetType.TRAIN.name))

    schema = dataset_schema.from_feature_spec(
        utils.get_processed_data_schema())
    for dataset in _DatasetType:
        if not dataset.value:
            continue
        _ = (
            data[dataset.name]
            | 'Shuffle{}'.format(dataset.name) >> shuffle()  # pylint: disable=no-value-for-parameter
            |
            'WriteFiles{}'.format(dataset.name) >> tfrecordio.WriteToTFRecord(
                os.path.join(params.output_dir,
                             dataset.name + constants.TFRECORD),
                coder=example_proto_coder.ExampleProtoCoder(schema)))
예제 #8
0
def inputs(batch_size, num_epochs, input_tfrecord):

    if not num_epochs:
        num_epochs = None

    with tf.name_scope('input'):

        files = gfile.Glob(input_tfrecord)
        files = sorted(files)

        print("files num : ", len(files))

        if not files:
            raise IOError("Unable to find training files. data_pattern='" +
                          input_tfrecord + "'.")
        logging.info("Number of training files: %s.", str(len(files)))

        filename_queue = tf.train.string_input_producer(files,
                                                        num_epochs=num_epochs,
                                                        shuffle=False)

        image, label = read_and_decode(filename_queue)

        print("image     :", image.shape)
        print("label      :", label.shape)

        image_batch, label_batch = tf.train.shuffle_batch(
            [image, label],
            batch_size=batch_size,
            num_threads=10,
            capacity=10000 + 15 * batch_size,
            min_after_dequeue=10000,
            allow_smaller_final_batch=False  # True --> error ...
        )

        return image_batch, label_batch, len(files)
예제 #9
0
def GlobTest():
    file_name = "E:\\mini_project\\python_test\\module_test\\tf_test\\data_space"
    file = gfile.Glob(file_name)
    print(file)
예제 #10
0
def main(argv):
    del argv

    if not gfile.Exists(FLAGS.save_dir):
        try:
            gfile.MkDir(FLAGS.save_dir)
        except:
            print(('WARNING: error creating save directory, '))

    save_dir = os.path.join(FLAGS.save_dir,
                            FLAGS.dataset + '_' + FLAGS.sampling_method)

    if FLAGS.do_save == "True":
        if not gfile.Exists(save_dir):
            try:
                gfile.MkDir(save_dir)
            except:
                print(('WARNING: error creating save directory, '
                       'directory most likely already created.'))

        # Set up logging
        filename = os.path.join(
            save_dir,
            "log-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + ".txt")
        sys.stdout = utils.Logger(filename)

    X, y = utils.get_mldata(FLAGS.data_dir, FLAGS.dataset)  #load dataset!
    starting_seed = FLAGS.seed

    all_results = {}

    for seed in range(starting_seed, starting_seed + FLAGS.trials):
        sampler = get_AL_sampler(FLAGS.sampling_method)  #load sampler!
        score_model = utils.get_model(FLAGS.score_method,
                                      seed)  #load score model!
        if (FLAGS.select_method == "None" or  #load select model!
                FLAGS.select_method == FLAGS.score_method):
            select_model = None
        else:
            select_model = utils.get_model(FLAGS.select_method, seed)

        results, sampler_state = \
        generate_one_curve(X=X,
                           y=y,
                           sampler=sampler,
                           score_model=score_model,
                           seed=seed,
                           warmstart_size=FLAGS.warmstart_size,
                           batch_size=FLAGS.batch_size,
                           select_model=select_model,
                           max_points=FLAGS.max_dataset_size)

        key = (FLAGS.dataset, FLAGS.sampling_method, FLAGS.score_method,
               FLAGS.select_method, FLAGS.warmstart_size, FLAGS.batch_size,
               seed)

        #sampler_output = sampler_state.to_dict()
        #results['sampler_output'] = sampler_output
        results['sampler_output'] = None
        all_results[key] = results

    fields = [
        'dataset', 'sampling_methods', 'score_method', 'select_method',
        'warmstart size', 'batch size', 'seed'
    ]
    all_results['tuple_keys'] = fields

    if FLAGS.do_save == "True":
        filename = ("results_score_" + FLAGS.score_method + "_select_" +
                    FLAGS.select_method)
        existing_files = gfile.Glob(os.path.join(save_dir, filename + "*.pkl"))
        filename = os.path.join(
            save_dir,
            filename + "_" + str(1000 + len(existing_files))[1:] + ".pkl")
        pickle.dump(all_results, gfile.GFile(filename, "w"))
        sys.stdout.flush_file()
예제 #11
0
  def upload_variants(self,
                      dataset,
                      variantset,
                      source_vcfs,
                      destination_table,
                      expand_wildcards=False,
                      new_dataset=False,
                      new_variantset=False,
                      description=None):
    """Imports variants stored in a VCF in Cloud Storage to BigQuery.

    Handle all intermediate steps, including finding dataset and variant sets.

    Args:
      dataset: Name or id of existing dataset, or name for a new dataset.
      variantset: Name or id of existing variant set, or name for a new one.
      source_vcfs: List of VCF file[s] in Cloud Storage, wildcards accepted
          (*, not **).
      destination_table: BigQuery output, as PROJECT_ID.DATASET_NAME.TABLE_NAME.
      expand_wildcards: Expand wildcards in VCF paths and use parallel imports.
      new_dataset: Always create a new dataset with the requested name.
      new_variantset: Always create a new variant set with the requested name.
      description: Optional description for the BigQuery table.

    Raises:
      RuntimeError: If an upload or export request does not succeed.
    """

    dataset_id = self.find_or_create_dataset(dataset,
                                             always_create=new_dataset)

    variantset_id = self.find_or_create_variantset(
        variantset,
        dataset_id,
        description="\t".join(source_vcfs),
        always_create=new_variantset)

    # Spawn off parallel imports for each VCF.
    if expand_wildcards and gfile is not None:
      # Expand any wildcarded paths and concatenate all files together.
      source_vcfs = sum([gfile.Glob(source_vcf) for source_vcf in source_vcfs],
                        [])

    operation_ids = []
    for source_vcf in source_vcfs:
      operation_ids.append(self.import_variants(source_vcf, variantset_id))
      logging.info("Importing %s (%s)", source_vcf, operation_ids[-1])

    # Wait for all imports to complete successfully before exporting variantset.
    for operation_id in operation_ids:
      if not self.wait_for_operation(operation_id):
        raise RuntimeError("Failed to import variants to Genomics (%s)"
                           % operation_id)

    operation_id = self.export_variants(variantset_id, destination_table)
    logging.info("Exporting %s (%s)", variantset, operation_id)

    if not self.wait_for_operation(operation_id):
      raise RuntimeError("Failed to export variants to BigQuery (%s)"
                         % operation_id)

    # Assume the VCF header is the same for all files and so just use the first.
    logging.info("Updating schema for %s", variantset)
    schema_update_utils.update_table_schema(destination_table,
                                            source_vcfs[0],
                                            description=description)
예제 #12
0
def main(argv):
  del argv

  if not gfile.Exists(FLAGS.save_dir):
    try:
      gfile.MkDir(FLAGS.save_dir)
    except:
      print(('WARNING: error creating save directory, '
             'directory most likely already created.'))

  save_dir = os.path.join(
      FLAGS.save_dir,
      FLAGS.dataset + "_" + FLAGS.sampling_method)
  do_save = FLAGS.do_save == "True"

  if do_save:
    if not gfile.Exists(save_dir):
      try:
        gfile.MkDir(save_dir)
      except:
        print(('WARNING: error creating save directory, '
               'directory most likely already created.'))
    # Set up logging
    filename = os.path.join(
        save_dir, "log-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + ".txt")
    sys.stdout = utils.Logger(filename)

  confusions = [float(t) for t in FLAGS.confusions.split(" ")]
  mixtures = [float(t) for t in FLAGS.active_sampling_percentage.split(" ")]
  all_results = {}
  max_dataset_size = None if FLAGS.max_dataset_size == "0" else int(
      FLAGS.max_dataset_size)
  normalize_data = FLAGS.normalize_data == "True"
  standardize_data = FLAGS.standardize_data == "True"
  X, y = utils.get_mldata(FLAGS.data_dir, FLAGS.dataset)
  starting_seed = FLAGS.seed

  for c in confusions:
    for m in mixtures:
      for seed in range(starting_seed, starting_seed + FLAGS.trials):
        sampler = get_AL_sampler(FLAGS.sampling_method)
        score_model = utils.get_model(FLAGS.score_method, seed)
        if (FLAGS.select_method == "None" or
            FLAGS.select_method == FLAGS.score_method):
          select_model = None
        else:
          select_model = utils.get_model(FLAGS.select_method, seed)
        results, sampler_state = generate_one_curve(
            X, y, sampler, score_model, seed, FLAGS.warmstart_size,
            FLAGS.batch_size, select_model, c, m, max_dataset_size,
            standardize_data, normalize_data, FLAGS.train_horizon)
        key = (FLAGS.dataset, FLAGS.sampling_method, FLAGS.score_method,
               FLAGS.select_method, m, FLAGS.warmstart_size, FLAGS.batch_size,
               c, standardize_data, normalize_data, seed)
        sampler_output = sampler_state.to_dict()
        results["sampler_output"] = sampler_output
        all_results[key] = results
  fields = [
      "dataset", "sampler", "score_method", "select_method",
      "active percentage", "warmstart size", "batch size", "confusion",
      "standardize", "normalize", "seed"
  ]
  all_results["tuple_keys"] = fields

  if do_save:
    filename = ("results_score_" + FLAGS.score_method +
                "_select_" + FLAGS.select_method +
                "_norm_" + str(normalize_data) +
                "_stand_" + str(standardize_data))
    existing_files = gfile.Glob(os.path.join(save_dir, filename + "*.pkl"))
    filename = os.path.join(save_dir,
                            filename + "_" + str(1000+len(existing_files))[1:] + ".pkl")
    pickle.dump(all_results, gfile.GFile(filename, "w"))
    sys.stdout.flush_file()
예제 #13
0
# import tensorflow.contrib.slim as slim
from tensorflow import gfile
from tensorflow import logging
from yt8m_util import cross_entropy_loss, logistic_regress_model
from yt8m_reader import YT8MVideoLevelFeatureReader
import time

batch_size = 1024
# data_dir = '/tmp/audioset_v1_embedings/unbal_train'
data_dir = '/home/showlove/cc/youtube-8m/tmp/audioset_v1_embeddings/unbal_train'
train_dir = '/tmp/yt8m_model'
class_size = 4716
max_epoch = 100
max_step = 10000

files = gfile.Glob(data_dir)
filename_queue = tf.train.string_input_producer(files,
                                                num_epochs=None,
                                                shuffle=True)
reader = YT8MVideoLevelFeatureReader(num_classes=class_size)
unused_id, input_feature, lables, num_frames = reader.get_batch_data(
    filename_queue)
unused_id, input_feature, lables, num_frames = tf.train.shuffle_batch(
    [unused_id, input_feature, lables, num_frames],
    batch_size=batch_size,
    capacity=5 * batch_size,
    min_after_dequeue=batch_size,
    allow_smaller_final_batch=True,
    enqueue_many=True)
y_out = logistic_regress_model(input_feature, class_size)
loss = cross_entropy_loss(y_out, lables)
예제 #14
0
 def count_games():
     # returns number of games in the selfplay directory
     if not os.path.exists(os.path.join(SELFPLAY_DIR, model_name)):
         # directory not existing implies no games have been played yet
         return 0
     return len(gfile.Glob(os.path.join(SELFPLAY_DIR, model_name, '*.zz')))
예제 #15
0
    def train(self):
        logger.log(logger._snapshot_dir)
        basedir = logger._snapshot_dir
        if basedir is None:
            basedir = 'model/'
        else:
            basedir += "/"
        nlen = self.nlen
        videos = gfile.Glob("model/videos/*.mp4")
        # videos = pickle.load(open('videolist.pkl', 'rb'))
        idata = [[] for _ in range(nlen)]
        nfail = 0
        itr = 0
        np.random.shuffle(videos)
        for name in videos:
            try:
                vid = imageio.get_reader(name, 'ffmpeg')
                if itr % 100 == 0:
                    logger.log("%s %s" % (itr, len(idata[0])))
                if len(vid) == 51:
                    frames = []
                    for j in range(1, 51, self.nskip):
                        frame = transform(vid.get_data(j), self.idims[0],
                                          self.idims[1], self.rescale)
                        if not self.inception and np.max(frame) == -1:
                            logger.log("rip %s %s" % (itr, name))
                            frames = []
                            break
                        frames.append(frame)
                    if len(frames) != nlen:
                        continue
                    for j, f in enumerate(frames):
                        idata[j].append(f)
                else:
                    logger.log("%s %s" % (name, len(vid)))
                itr += 1
            except:
                nfail += 1
                logger.log("Unexpected error:" + str(sys.exc_info()))
                logger.log(name)
                if nfail > 10:
                    break
            if itr >= self.nvideos:
                break
        vdata = np.array(idata)
        np.save(basedir + 'vdata_strike' + str(itr), vdata)
        logger.log(str(vdata.shape))

        # tf.reset_default_graph()
        batch_size = self.batch_size
        if self.inception:
            tfinput = tf.placeholder(tf.uint8, (
                3,
                batch_size,
            ) + self.idims + (3, ),
                                     name='image')
            image_trans = tf.image.convert_image_dtype(tf.reshape(
                tensor=tfinput, shape=(3 * batch_size, ) + self.idims + (3, )),
                                                       dtype=tf.float32)
            image_trans = tf.subtract(image_trans, 0.5)
            image_trans = tf.multiply(image_trans, 2.0)
            with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
                model = inception_v3.inception_v3(image_trans,
                                                  num_classes=1001,
                                                  is_training=False,
                                                  dropout_keep_prob=1.0)
            variables_to_restore = slim.get_variables_to_restore()
            restorer = tf.train.Saver(variables_to_restore)
            bird = scipy.misc.imread('model/bird.jpg')
            bird = scipy.misc.imresize(bird, self.idims)
            test = self.model(strides=self.strides,
                              kernels=self.kernels,
                              filters=self.filters)
            featlayer = model[1]['Mixed_7c']
            featshape = featlayer.get_shape().as_list()
            featreshape = tf.reshape(
                featlayer,
                (3, batch_size, featshape[1], featshape[2], featshape[3]))
            with tf.variable_scope("contextmodel") as scope:
                test.build(featreshape)
        else:
            tfinput = tf.placeholder(tf.float32,
                                     (3, batch_size) + self.idims + (3, ),
                                     name='x')
            test = self.model()
            with tf.variable_scope("contextmodel") as scope:
                test.build(tfinput)

        sess = tf.Session()
        learning_rate = tf.placeholder(tf.float32, shape=[])

        train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                       "contextmodel")
        optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
            test.loss, var_list=train_vars)
        sess.run(tf.global_variables_initializer())
        allloss = []
        validloss = []
        itr = 0
        saver = tf.train.Saver()

        if self.inception:
            restorer.restore(sess, "model/inception_v3.ckpt")
            logits = sess.run(model[0], {tfinput: [[bird] * batch_size] * 3})
            print(np.argsort(logits[0])[-20:])
            # vid = imageio.get_reader('/home/andrewliu/research/viewpoint/train/strikebig/videos/openaigym.video.1456.26585.video000000.mp4',  'ffmpeg')
            # frame = scipy.misc.imresize(vid.get_data(0), self.idims)
            # layersout = sess.run(model[1]['Mixed_5d'], {tfinput:[[frame]*batch_size]*3})
            # print(np.max(layersout))

        n = vdata.shape[1]
        ntrain = self.ntrain
        nvalid = n - ntrain
        logger.log("%s %s" % (ntrain, nvalid))
        nn_err = tf.reduce_sum(
            tf.abs(
                tf.argmin(tf.reduce_mean(
                    (featreshape[2][:, None] - test.out)**2, axis=(2, 3, 4)),
                          axis=0) - np.arange(0, batch_size) % nlen))
        validdata = vdata[:, ntrain:]
        traindata = vdata[:, :ntrain]
        logger.log(str(validdata.shape) + str(traindata.shape))
        np.save(basedir + 'vdata_train', traindata[:, :200])
        for itr in range(1, self.nitr):
            choicesrc = np.random.choice(ntrain, batch_size)
            choicetgt = np.random.choice(ntrain, batch_size)
            srcdata = traindata[np.arange(0, batch_size) % nlen, choicesrc]
            tgtdata = traindata[np.arange(0, batch_size) % nlen, choicetgt]
            tgtctx = traindata[0, choicetgt]
            batch = [srcdata, tgtctx, tgtdata]

            #     logger.log(sess.run( [test.recon1, test.recon2, test.loss, test.simloss], {tfinput: batch, learning_rate:1e-4, tftrain:False}))
            if itr % 4 == 0:
                _, loss, sim, r1, r2, err = sess.run([
                    optimizer, test.loss, test.simloss, test.recon1,
                    test.recon2, nn_err
                ], {
                    tfinput: batch,
                    learning_rate: 1e-4
                })
                logger.log("%s %s %s %s %s %s" % (itr, loss, sim, r1, r2, err))
                allloss.append(loss)
            else:
                sess.run(optimizer, {tfinput: batch, learning_rate: 1e-4})

            if itr % 40 == 0 or itr % self.save_every == 0:
                choicesrc = np.random.choice(nvalid, batch_size)
                choicetgt = np.random.choice(nvalid, batch_size)
                srcdata = validdata[np.arange(0, batch_size) % nlen, choicesrc]
                tgtdata = validdata[np.arange(0, batch_size) % nlen, choicetgt]
                tgtctx = validdata[0, choicetgt]
                batch = [srcdata, tgtctx, tgtdata]
                loss, sim, r1, r2, err = sess.run([
                    test.loss, test.simloss, test.recon1, test.recon2, nn_err
                ], {tfinput: batch})
                logger.log("%s %s %s %s %s %s E" %
                           (itr, loss, sim, r1, r2, err))
                validloss.append(loss)
                if itr % self.save_every == 0:
                    os.mkdir(basedir + str(itr))
                    saver.save(
                        sess, '%s%d/model_%d_%.2f_%.2f_%.2f_%d' %
                        (basedir, itr, itr, loss, r1, r2, err))
                    np.save('%s%d/validloss' % (basedir, itr), validloss)
                    if not self.inception:
                        for kk in range(10):
                            choicesrc = [np.random.randint(nvalid)
                                         ] * batch_size
                            choicetgt = [np.random.randint(nvalid)
                                         ] * batch_size
                            srcdata = validdata[np.arange(0, batch_size) %
                                                nlen, choicesrc]
                            tgtdata = validdata[np.arange(0, batch_size) %
                                                nlen, choicetgt]
                            tgtctx = validdata[0, choicetgt]
                            batch = [srcdata, tgtctx, tgtdata]
                            L, r1, r2, testouts = sess.run([
                                test.loss, test.recon1, test.recon2, test.out
                            ], {tfinput: batch})
                            L, r1, r2, testouts2 = sess.run([
                                test.loss, test.recon1, test.recon2, test.out2
                            ], {tfinput: batch})
                            savegif("%s%d/__%dtrans.gif" % (basedir, itr, kk),
                                    testouts[:nlen])
                            savegif("%s%d/__%drecon.gif" % (basedir, itr, kk),
                                    testouts2[:nlen])
                if itr >= self.save_every:
                    logger.record_tabular('Iteration', itr)
                    logger.record_tabular('Loss', loss)
                    logger.record_tabular('Sim', sim)
                    logger.record_tabular('R1', r1)
                    logger.record_tabular('R2', r2)
                    logger.record_tabular('NNErr', err)
                    logger.dump_tabular(with_prefix=False)
예제 #16
0
def main(args):

    # make the export folder structure
    # this is made here because the Logger uses the filename
    if args.do_save:
        # make a base save directory
        utils.make_dir(args.save_dir)

        # make a directory in the base save directory with for the specific
        # method.
        save_subdir = os.path.join(args.save_dir,
                                   args.dataset + "_" + args.sampling_method)
        utils.make_dir(save_subdir)

        filename = os.path.join(
            save_subdir,
            "log-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + ".txt")
        sys.stdout = utils.Logger(filename)

    # confusion argument can have multiple values
    confusions = [float(t) for t in args.confusions.split(" ")]
    mixtures = [float(t) for t in args.active_sampling_percentage.split(" ")]
    max_dataset_size = None if args.max_dataset_size == 0 else args.max_dataset_size
    starting_seed = args.seed

    # get the dataset from file based on the data directory and dataset name
    X, y = utils.get_mldata(args.data_dir, args.dataset)

    # object to store the results in
    all_results = {}

    # percentage of labels to randomize
    for c in confusions:

        # Mixture weights on active sampling."
        for m in mixtures:

            # the number of curves created during multiple trials
            for seed in range(starting_seed, starting_seed + args.trials):

                # get the sampler based on the name
                # returns a python object
                # also named: query strategy
                sampler = get_AL_sampler(args.sampling_method)

                # get the model
                score_model = utils.get_model(args.score_method, seed)

                #
                if (args.select_method == "None"
                        or args.select_method == args.score_method):
                    select_model = None
                else:
                    select_model = utils.get_model(args.select_method, seed)

                # create the learning curve
                results, sampler_state = generate_one_curve(
                    X,
                    y,
                    sampler,
                    score_model,
                    seed,
                    args.warmstart_size,
                    args.batch_size,
                    select_model,
                    confusion=c,
                    active_p=m,
                    max_points=max_dataset_size,
                    standardize_data=args.standardize_data,
                    norm_data=args.normalize_data,
                    train_horizon=args.train_horizon)
                key = (args.dataset, args.sampling_method, args.score_method,
                       args.select_method, m, args.warmstart_size,
                       args.batch_size, c, args.standardize_data,
                       args.normalize_data, seed)
                sampler_output = sampler_state.to_dict()
                results["sampler_output"] = sampler_output
                all_results[key] = results

    # Not sure why this is done in a qay like this.
    fields = [
        "dataset", "sampler", "score_method", "select_method",
        "active percentage", "warmstart size", "batch size", "confusion",
        "standardize", "normalize", "seed"
    ]
    all_results["tuple_keys"] = fields

    # write the results to a file
    if args.do_save:

        # format the filename
        filename = "results_score_{}_select_{}_norm_{}_stand_{}".format(
            args.score_method, args.select_method, args.normalize_data,
            args.standardize_data)

        existing_files = gfile.Glob(
            os.path.join(save_subdir, "{}*.pkl".format(filename)))
        filepath = os.path.join(
            save_subdir, "{}_{}.pkl".format(filename,
                                            1000 + len(existing_files))[1:])

        # dump the dict to a pickle file
        pickle.dump(all_results, gfile.GFile(filepath, "w"))

        # flush stfout
        sys.stdout.flush_file()
예제 #17
0
def main(stage, split_id=""):
    feature_names = "rgb"
    feature_sizes = "1024"

    feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
        feature_names, feature_sizes)

    reader = readers.YT8MFrameFeatureReader(
        feature_names=feature_names,
        feature_sizes=feature_sizes,
    )

    # data_pattern = "/data/uts700/linchao/yt8m/data/{0}/{0}*.tfrecord".format(stage)
    data_pattern = "/data/uts700/linchao/yt8m/data/splits/{0}/{1}/{0}*.tfrecord".format(
        stage, split_id)
    # data_pattern = "/data/uts700/linchao/yt8m/data/splits/train/5/traincc.tfrecord"#.format(stage, split_id)
    num_readers = 3
    batch_size = 128
    input_shuffle = False

    files = gfile.Glob(data_pattern)
    if not files:
        raise IOError("Unable to find the evaluation files.")
    filename_queue = tf.train.string_input_producer(files,
                                                    shuffle=input_shuffle,
                                                    num_epochs=1)
    eval_data = [
        reader.prepare_reader(filename_queue) for _ in xrange(num_readers)
    ]

    # eval_data = reader.prepare_reader(filename_queue)
    if input_shuffle:
        eval_data = tf.train.shuffle_batch_join(eval_data,
                                                batch_size=batch_size,
                                                capacity=5 * batch_size,
                                                min_after_dequeue=batch_size,
                                                allow_smaller_final_batch=True)
    else:
        eval_data = tf.train.batch_join(eval_data,
                                        batch_size=batch_size,
                                        capacity=5 * batch_size,
                                        allow_smaller_final_batch=True)
    video_id_batch, model_input_raw, labels_batch, num_frames = eval_data
    inputs = {
        'video_id': video_id_batch,
        'input_raw': model_input_raw,
        'labels': labels_batch,
        'num_frames': num_frames
    }

    task = Stats()
    task = HDFS(stage, split_id)

    with tf.Session() as sess:
        sess.run([tf.local_variables_initializer()])
        coord = tf.train.Coordinator()
        try:
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    qr.create_threads(sess,
                                      coord=coord,
                                      daemon=True,
                                      start=True))

            examples_processed = 0
            cnt = 0
            while not coord.should_stop():
                if not task.run(sess, inputs):
                    break
                examples_processed += batch_size

                cnt += 1
                if cnt % 5 == 0:
                    print("examples processed: {}".format(examples_processed))

        except tf.errors.OutOfRangeError as e:
            logging.info(
                "Done with batched inference. Now calculating global performance "
                "metrics.")

    coord.request_stop()
    coord.join(threads, stop_grace_period_secs=10)
    task.done()
def _get(pattern):
    files = gfile.Glob(pattern)
    pool = multiprocessing.Pool()
    all_results = pool.map(_load, files)
    return pd.DataFrame(all_results)
예제 #19
0
def get_existing_corners(segmentation_dir):
    corners = []
    for path in gfile.Glob(os.path.join(segmentation_dir, 'seg-*_*_*.npz')):
        corners.append(get_corner_from_path(path))

    return corners
예제 #20
0
def main(unused_argv):

    for i in range(0, NUM_LOOP):
        if i == 0:
            src_model_name = shipname.generate(0)
            fsdb.switch_base(os.path.join(base_dir, src_model_name))
            src_model_path = os.path.join(fsdb.models_dir(), src_model_name)
            bootstrap_model_path = os.path.join(fsdb.models_dir(),
                                                src_model_name)
            mask_flags.checked_run([
                'python3', 'bootstrap.py',
                '--export_path={}'.format(bootstrap_model_path),
                '--work_dir={}'.format(fsdb.working_dir()),
                '--flagfile=rl_loop/local_flags'
            ])
            dst_model_name = shipname.generate(1)
            fsdb.switch_base(os.path.join(base_dir, dst_model_name))
        else:
            src_model_name = dst_model_name
            src_model_path = os.path.join(fsdb.models_dir(), src_model_name)
            dst_model_name = shipname.generate(i + 1)
            fsdb.switch_base(os.path.join(base_dir, dst_model_name))

        utils.ensure_dir_exists(fsdb.models_dir())
        utils.ensure_dir_exists(fsdb.selfplay_dir())
        utils.ensure_dir_exists(fsdb.holdout_dir())
        utils.ensure_dir_exists(fsdb.sgf_dir())
        utils.ensure_dir_exists(fsdb.eval_dir())
        utils.ensure_dir_exists(fsdb.golden_chunk_dir())
        utils.ensure_dir_exists(fsdb.working_dir())

        #bootstrap_name = shipname.generate(0)
        #bootstrap_model_path = os.path.join(fsdb.models_dir(), bootstrap_name)

        print(src_model_name)
        print(src_model_path)
        selfplay_cmd = [
            'python3', 'selfplay.py', '--load_file={}'.format(src_model_path),
            '--selfplay_dir={}'.format(
                os.path.join(fsdb.selfplay_dir(),
                             dst_model_name)), '--holdout_dir={}'.format(
                                 os.path.join(fsdb.holdout_dir(),
                                              dst_model_name)),
            '--sgf_dir={}'.format(fsdb.sgf_dir()), '--holdout_pct=0',
            '--flagfile=rl_loop/local_flags'
        ]

        # Selfplay twice
        mask_flags.checked_run(selfplay_cmd)
        mask_flags.checked_run(selfplay_cmd)

        # and once more to generate a held out game for validation
        # exploits flags behavior where if you pass flag twice, second one wins.
        mask_flags.checked_run(selfplay_cmd + ['--holdout_pct=100'])

        # Double check that at least one sgf has been generated.
        assert os.listdir(os.path.join(fsdb.sgf_dir(), 'full'))

        print("Making shuffled golden chunk from selfplay data...")
        # TODO(amj): refactor example_buffer so it can be called the same way
        # as everything else.
        eb.make_chunk_for(output_dir=fsdb.golden_chunk_dir(),
                          local_dir=fsdb.working_dir(),
                          game_dir=fsdb.selfplay_dir(),
                          model_num=1,
                          positions=64,
                          threads=8,
                          sampling_frac=1)

        tf_records = sorted(
            gfile.Glob(os.path.join(fsdb.golden_chunk_dir(), '*.tfrecord.zz')))

        #trained_model_name = shipname.generate(1)
        trained_model_name = dst_model_name
        trained_model_path = os.path.join(fsdb.models_dir(),
                                          trained_model_name)

        # Train on shuffled game data
        mask_flags.checked_run([
            'python3', 'train.py', *tf_records,
            '--work_dir={}'.format(fsdb.working_dir()),
            '--export_path={}'.format(trained_model_path),
            '--flagfile=rl_loop/local_flags'
        ])

    print("Finished!")
예제 #21
0
 def count_games():
     # returns number of games in the selfplay directory
     if not os.path.exists(selfplay_dir):
         # directory not existing implies no games have been played yet
         return 0
     return len(gfile.Glob(os.path.join(selfplay_dir, '*.zz')))
예제 #22
0
파일: test.py 프로젝트: bullud/testTFRecord
import tensorflow as tf
from tensorflow import gfile
from tensorflow import logging

num_epochs = 1

batch_size = 1

num_classes = 4716

data_pattern = '/Users/super/yt8m/train*.tfrecord'

#files = [gfile.Glob(data_pattern[i]) for i in range(len(data_pattern))]

files = gfile.Glob(data_pattern)
if not files:
    raise IOError("Unable to find training files. data_pattern='" +
                  str(data_pattern) + "'.")

logging.info("Number of training files: %s.", str(len(files)))

filename_queue = tf.train.string_input_producer(files,
                                                num_epochs=num_epochs,
                                                shuffle=False)

reader = tf.TFRecordReader()

_, serialized_examples = reader.read_up_to(filename_queue, batch_size)

feature_map = {
    "video_id": tf.FixedLenFeature([], tf.string),
def read_and_return_stats(tfrecord_data_path,
                          feature_names,
                          feature_sizes,
                          num_classes,
                          max_frames=300,
                          max_quantized_value=2,
                          min_quantized_value=-2):
    """
    This reads all the tfrecord files in a given directory and provides stats as to how many number of
    total tfrecords, in that how many are water samples and non-water samples.

    :param tfrecord_data_path:
    :param feature_names:
    :param feature_sizes:
    :param num_classes:
    :param max_frames:
    :param max_quantized_value:
    :param min_quantized_value:
    :return:
    """
    # grab the tensorflow session
    with tf.Session() as sess:
        list_of_feature_names = [
            feature_names.strip() for feature_names in feature_names.split(',')
        ]
        list_of_feature_sizes = [
            int(feature_sizes) for feature_sizes in feature_sizes.split(',')
        ]

        files = gfile.Glob(tfrecord_data_path)
        if not files:
            raise IOError(
                "Unable to find training files. tfrecord_data_path='" +
                tfrecord_data_path + "'.")
        logging.info("Number of training files: %s.", str(len(files)))
        filename_queue = tf.train.string_input_producer(files,
                                                        num_epochs=1,
                                                        shuffle=False)

        # training_data = [
        #     reader.prepare_reader(filename_queue) for _ in range(num_readers)
        # ]

        reader = tf.TFRecordReader()
        filename, serialized_example = reader.read(filename_queue)

        contexts, features = tf.parse_single_sequence_example(
            serialized_example,
            context_features={
                "video_id": tf.FixedLenFeature([], tf.string),
                "labels": tf.VarLenFeature(tf.int64)
            },
            sequence_features={
                feature_name: tf.FixedLenSequenceFeature([], dtype=tf.string)
                for feature_name in list_of_feature_names
            })

        # read ground truth labels
        labels = (tf.cast(
            tf.sparse_to_dense(contexts["labels"].values, (num_classes, ),
                               1,
                               validate_indices=False), tf.int32))

        num_features = len(list_of_feature_names)

        # loads different types of features in the feature_lists and concatenates them
        feature_matrices = [None] * num_features
        for feature_index in range(num_features):
            feature_matrix, num_frames_in_this_feature = get_audio_feature_matrix(
                features[list_of_feature_names[feature_index]],
                list_of_feature_sizes[feature_index], max_frames,
                max_quantized_value, min_quantized_value)
            # add to the feature_matrices list
            feature_matrices[feature_index] = feature_matrix

        # concatenate different features
        audio_matrices = tf.concat(feature_matrices, 1)

        batch_video_ids, batch_audio_matrices, batch_labels = tf.train.batch(
            [
                tf.expand_dims(contexts["video_id"], 0),
                tf.expand_dims(audio_matrices, 0),
                tf.expand_dims(labels, 0)
            ],
            batch_size=1,
            capacity=1 * 3,
            num_threads=1)

        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())

        # create a Coordinator and run all QueueRunners
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        count_tfrecord = 0
        count_water_samples = 0
        count_non_water_samples = 0
        # indices representing water related classes..
        # These are identified as labels containing water samples
        # 288 - 297, 370 - 372, 374, 444 - 446, 448 - 456
        indices_of_water_classes = {
            288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 370, 371, 372,
            374, 444, 445, 446, 448, 449, 450, 451, 452, 453, 454, 455, 456
        }
        # Run the tensorflow session to read from the tfrecord files..
        try:
            while not coord.should_stop():
                video_id, audio_feature_matrix, label = sess.run(
                    [batch_video_ids, batch_audio_matrices, batch_labels])
                count_tfrecord = count_tfrecord + 1
                # print context
                # print('Context:')
                # print('video_id: {}'.format(video_id))
                # print('label: {}'.format(label))
                indices_of_classes_present = np.where(label == 1)[2]
                if any(x in indices_of_water_classes
                       for x in indices_of_classes_present):
                    count_water_samples = count_water_samples + 1
                else:
                    count_non_water_samples = count_non_water_samples + 1
                # print feature lists
                # print('\nFeature Lists:')
                # import numpy
                # numpy.set_printoptions(threshold=numpy.nan)
                # print('audio_feature_matrix: {}'.format(audio_feature_matrix))
        except tf.errors.OutOfRangeError:
            print("Done reading tfrecords")

        # print the count of tfrecord
        print('TFRecord count: {}'.format(count_tfrecord))
        print('Total count of water samples: {}'.format(count_water_samples))
        print('Total count of non-water samples: {}'.format(
            count_non_water_samples))
        # request to stop the threads
        coord.request_stop()
        # wait for the threads to stop
        coord.join(threads)
        sess.close()
        return count_tfrecord, count_water_samples, count_non_water_samples
예제 #24
0
파일: train.py 프로젝트: yuhai-china/Y8M
def build_graph(reader,
                model,
                train_data_pattern,
                train_data_pattern2,
                train_data_pattern3,
                eval_data_pattern,
                label_loss_fn=losses.CrossEntropyLoss(),
                batch_size=1000,
                base_learning_rate=0.01,
                learning_rate_decay_examples=1000000,
                learning_rate_decay=0.95,
                optimizer_class=tf.train.AdamOptimizer,
                clip_gradient_norm=1.0,
                regularization_penalty=1,
                num_readers=1,
                num_epochs=None,
                l2_penalty=1e-8,
                gpu_only=1):
    """Creates the Tensorflow graph.

  This will only be called once in the life of
  a training model, because after the graph is created the model will be
  restored from a meta graph file rather than being recreated.

  Args:
    reader: The data file reader. It should inherit from BaseReader.
    model: The core model (e.g. logistic or neural net). It should inherit
           from BaseModel.
    train_data_pattern: glob path to the training data files.
    label_loss_fn: What kind of loss to apply to the model. It should inherit
                from BaseLoss.
    batch_size: How many examples to process at a time.
    base_learning_rate: What learning rate to initialize the optimizer with.
    optimizer_class: Which optimization algorithm to use.
    clip_gradient_norm: Magnitude of the gradient to clip to.
    regularization_penalty: How much weight to give the regularization loss
                            compared to the label loss.
    num_readers: How many threads to use for I/O operations.
    num_epochs: How many passes to make over the data. 'None' means an
                unlimited number of passes.
  """
    # data files
    files1 = gfile.Glob(train_data_pattern)
    files2 = gfile.Glob(train_data_pattern2)
    files3 = gfile.Glob(train_data_pattern3)
    files = files1 + files2 + files3
    if not files:
        raise IOError("Unable to find training files. data_pattern='" +
                      data_pattern + "'.")
    logging.info("Total number of training files: %s + %s + %s =  %s.",
                 str(len(files1)), str(len(files2)), str(len(files3)),
                 str(len(files)))

    files4 = gfile.Glob(eval_data_pattern)
    logging.info("Total number of eval files: %s.", str(len(files4)))

    if FLAGS.fold == -1:
        validate_files = files4
        train_files = files
    else:
        validate_files = files[FLAGS.fold::5]
        train_files = [x for x in files if x not in validate_files]

    logging.info("train files: {}, first is: {}.".format(
        len(train_files), train_files[0].split('/')[-1]))
    logging.info("eval files: {}, first is: {}.".format(
        len(validate_files), validate_files[0].split('/')[-1]))

    # label weights for loss function. ugly hard coded for now.
    wgts_np = np.ones(FLAGS.truncated_num_classes)
    over_weight_labels = False
    if over_weight_labels:
        labels_to_overwgt = [
            38, 47, 49, 55, 72, 76, 86, 89, 93, 94, 95, 98, 99, 101, 102, 110,
            111, 113, 114, 115, 120, 121
        ]
        wgts_np[labels_to_overwgt] = 2.0
    wgts_4_lossfn = tf.constant(wgts_np, dtype=tf.float32)

    global_step = tf.Variable(0, trainable=False, name="global_step")
    restart_learning_rate = tf.Variable(base_learning_rate,
                                        trainable=False,
                                        name="restart_learning_rate")

    local_device_protos = device_lib.list_local_devices()
    gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']
    num_gpus = len(gpus)

    if num_gpus > 0:
        logging.info("Using the following GPUs to train: " + str(gpus))
        num_towers = num_gpus
        device_string = '/gpu:%d'
    else:
        logging.info("No GPUs found. Training on CPU.")
        num_towers = 1
        device_string = '/cpu:%d'

    learning_rate = tf.train.exponential_decay(restart_learning_rate,
                                               global_step * batch_size *
                                               num_towers,
                                               learning_rate_decay_examples,
                                               learning_rate_decay,
                                               staircase=True)
    tf.summary.scalar('learning_rate', learning_rate)

    optimizer = optimizer_class(learning_rate)
    unused_video_id, model_input_raw, labels_batch, num_frames = (
        get_input_data_tensors(reader,
                               train_files,
                               batch_size=batch_size * num_towers,
                               num_readers=num_readers,
                               num_epochs=num_epochs))
    tf.summary.histogram("model/input_raw", model_input_raw)

    # model params
    # probabilities for keeping a neuron in a layer, assuming max 10 layers, below default value
    with tf.variable_scope("tower", reuse=True) as scope:
        layers_keep_probs = tf.Variable(
            [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
            trainable=False,
            name="layers_keep_probs")
    model_input = model_input_raw
    if FLAGS.apply_global_normalization:
        g_mean, g_std = model_utils.load_global_moments()
        g_inv_std = 1.0 / g_std
        global_mean = tf.constant(g_mean, dtype=tf.float32)
        # expand global mean to match new dimension and fill rest with zeros
        new_dim = tf.cast(model_input_raw.shape[1], tf.int32)
        zero_padding = tf.zeros(new_dim - tf.shape(global_mean), tf.float32)
        global_mean_padded = tf.concat([global_mean, zero_padding], 0)
        # expand global inv std to match new dimension and fill rest with ones
        global_inv_std = tf.constant(g_inv_std, dtype=tf.float32)
        one_padding = tf.ones(new_dim - tf.shape(global_inv_std), tf.float32)
        global_inv_std_padded = tf.concat([global_inv_std, one_padding], 0)
        # apply normalizations (can do both) if requested
        # global L2 normalization
        model_input = tf.multiply(tf.subtract(model_input, global_mean_padded),
                                  global_inv_std_padded)
    # regular L2 normalization
    if FLAGS.apply_batch_l2_normalization:
        feature_dim = len(model_input.get_shape()) - 1
        model_input = tf.nn.l2_normalize(model_input, feature_dim)

    tower_inputs = tf.split(model_input, num_towers)
    tower_labels = tf.split(labels_batch, num_towers)
    tower_num_frames = tf.split(num_frames, num_towers)
    tower_gradients = []
    tower_predictions = []
    tower_label_losses = []
    tower_reg_losses = []

    # eval graph - to monitor performance out of sample during training
    e_video_id, e_input_raw, e_labels_batch, e_num_frames = (
        get_input_data_tensors(reader,
                               validate_files,
                               batch_size=batch_size * num_towers,
                               num_readers=num_readers,
                               num_epochs=2 * num_epochs))
    e_input = e_input_raw
    if FLAGS.apply_global_normalization:
        e_input = tf.multiply(tf.subtract(e_input, global_mean_padded),
                              global_inv_std_padded)
    if FLAGS.apply_batch_l2_normalization:
        feature_dim = len(model_input.get_shape()) - 1
        e_input = tf.nn.l2_normalize(e_input, feature_dim)

    e_tower_inputs = tf.split(e_input, num_towers)
    e_tower_labels = tf.split(e_labels_batch, num_towers)
    e_tower_num_frames = tf.split(e_num_frames, num_towers)
    e_tower_predictions = []
    e_tower_layers_keep_probs = tf.Variable(
        [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
        trainable=False,
        name="layers_keep_probs")
    logging.info(e_tower_inputs)
    # end eval
    for i in range(num_towers):
        # For some reason these 'with' statements can't be combined onto the same
        # line. They have to be nested.
        logging.info('For tower: ' + str(i))
        with tf.device(device_string % i):
            with (tf.variable_scope(("tower"), reuse=True if i > 0 else None)):
                with (slim.arg_scope(
                    [slim.model_variable, slim.variable],
                        device="/cpu:0" if num_gpus != 1 else "/gpu:0")):
                    logging.info(layers_keep_probs)
                    result = model.create_model(
                        tower_inputs[i],
                        num_frames=tower_num_frames[i],
                        vocab_size=reader.num_classes,
                        labels=tower_labels[i],
                        layers_keep_probs=layers_keep_probs,
                        l2_penalty=l2_penalty,
                        is_training=True)
                    for variable in slim.get_model_variables():
                        logging.info(variable)
                        tf.summary.histogram(variable.op.name, variable)

                    # create shadow moving average model variables
                    if FLAGS.use_ema == True:
                        model_vars = [x for x in slim.get_model_variables()]
                        ema = tf.train.ExponentialMovingAverage(
                            decay=1.0 - 1.0 / FLAGS.ema_halflife)
                        ema_op = ema.apply(model_vars)
                        logging.info("model_vars:")
                        logging.info(" || ".join([str(x) for x in model_vars]))
                        ema_vars = [ema.average(x) for x in model_vars]
                        ema_vars_pair_dict = {
                            ema.average_name(x): x.op.name
                            for x in model_vars
                        }
                        logging.info("ema_vars_pair_dict:")
                        for x, y in ema_vars_pair_dict.items():
                            logging.info(x + ': ' + y)
                        for v in ema_vars:
                            tf.summary.histogram(v.op.name, v)
                        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, ema_op)
                        tf.add_to_collection("ema_op", ema_op)

                    predictions = result["predictions"]
                    tower_predictions.append(predictions)

                    if "loss" in result.keys():
                        label_loss = result["loss"]
                    else:
                        label_loss = label_loss_fn.calculate_loss(
                            predictions, tower_labels[i], FLAGS.loss_epsilon)

                    if "regularization_loss" in result.keys():
                        reg_loss = result["regularization_loss"]
                    else:
                        reg_loss = tf.constant(0.0)

                    reg_losses = tf.losses.get_regularization_losses()
                    if reg_losses:
                        reg_loss += tf.add_n(reg_losses)

                    tower_reg_losses.append(reg_loss)

                    # Adds update_ops (e.g., moving average updates in batch normalization) as
                    # a dependency to the train_op.
                    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
                    if "update_ops" in result.keys():
                        update_ops += result["update_ops"]
                    if update_ops:
                        with tf.control_dependencies(update_ops):
                            barrier = tf.no_op(name="gradient_barrier")
                            with tf.control_dependencies([barrier]):
                                label_loss = tf.identity(label_loss)

                    tower_label_losses.append(label_loss)

                    # Incorporate the L2 weight penalties etc.
                    final_loss = regularization_penalty * reg_loss + label_loss
                    gradients = optimizer.compute_gradients(
                        final_loss, colocate_gradients_with_ops=False)
                    tower_gradients.append(gradients)

                    # eval ops
                    logging.info("eval ops")
                    e_result = model.create_model(
                        e_tower_inputs[i],
                        num_frames=e_tower_num_frames[i],
                        vocab_size=reader.num_classes,
                        labels=e_tower_labels[i],
                        layers_keep_probs=
                        e_tower_layers_keep_probs,  #tf.Variable([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], tf.float32, name="layers_keep_probs")
                        l2_penalty=l2_penalty,
                        is_training=False)

                    e_predictions = e_result["predictions"]
                    e_tower_predictions.append(e_predictions)
                    # end eval ops

    label_loss = tf.reduce_mean(tf.stack(tower_label_losses))
    tf.summary.scalar("label_loss", label_loss)
    if regularization_penalty != 0:
        reg_loss = tf.reduce_mean(tf.stack(tower_reg_losses))
        tf.summary.scalar("reg_loss", reg_loss)
    merged_gradients = utils.combine_gradients(tower_gradients)

    if clip_gradient_norm > 0:
        with tf.name_scope('clip_grads'):
            merged_gradients = utils.clip_gradient_norms(
                merged_gradients, clip_gradient_norm)

    train_op = optimizer.apply_gradients(merged_gradients,
                                         global_step=global_step)

    tf.add_to_collection("global_step", global_step)
    tf.add_to_collection("restart_learning_rate", restart_learning_rate)
    tf.add_to_collection("layers_keep_probs", layers_keep_probs)
    tf.add_to_collection("loss", label_loss)
    tf.add_to_collection("predictions", tf.concat(tower_predictions, 0))
    tf.add_to_collection("input_batch_raw", model_input_raw)
    tf.add_to_collection("input_batch", model_input)
    tf.add_to_collection("num_frames", num_frames)
    tf.add_to_collection("labels", tf.cast(labels_batch, tf.float32))
    tf.add_to_collection("train_op", train_op)
    #tf.add_to_collection("ema_op", ema_op)

    # add eval graph
    e_label_loss = label_loss_fn.calculate_loss(
        tf.concat(e_tower_predictions, 0), e_labels_batch, FLAGS.loss_epsilon)
    tf.summary.scalar("e_label_loss", e_label_loss)

    tf.add_to_collection("e_predictions", tf.concat(e_tower_predictions, 0))
    tf.add_to_collection("e_labels", tf.cast(e_labels_batch, tf.float32))
    tf.add_to_collection("e_loss", e_label_loss)
예제 #25
0
파일: main.py 프로젝트: thefunkjunky/minigo
def train_dir(chunk_dir: 'Directory where training chunks are.',
              model_save_path: 'Where to export the completed generation.'):
    tf_records = sorted(gfile.Glob(os.path.join(chunk_dir, '*.tfrecord.zz')))
    tf_records = tf_records[-1 * (WINDOW_SIZE // EXAMPLES_PER_RECORD):]

    train(tf_records, model_save_path)
예제 #26
0
파일: fsdb.py 프로젝트: thefunkjunky/minigo
def game_counts(n_back=20):
    """Prints statistics for the most recent n_back models"""
    for _, model_name in get_models[-n_back:]:
        games = gfile.Glob(os.path.join(selfplay_dir(), model_name, '*.zz'))
        print(model_name, len(games))
예제 #27
0
def get_games(model_name):
    return gfile.Glob(os.path.join(selfplay_dir(), model_name, '*.zz'))
예제 #28
0
def get_input_data_tensors(reader, data_pattern, batch_size, num_readers=1):
  """Creates the section of the graph which reads the input data.

  Args:
    reader: A class which parses the input data.
    data_pattern: A 'glob' style path to the data files.
    batch_size: How many examples to process at a time.
    num_readers: How many I/O threads to use.

  Returns:
    A tuple containing the features tensor, labels tensor, and optionally a
    tensor containing the number of frames per video. The exact dimensions
    depend on the reader being used.

  Raises:
    IOError: If no files matching the given pattern were found.
  """
  with tf.name_scope("input"):
    if 'validate' in data_pattern:  
      # randomly chosen 60 validate files
      # note that validate file names are different on gcloud and locally, due to `curl` download command

      # gcloud
      if FLAGS.google_cloud:
        a_list = list(string.ascii_lowercase)
        A_list = list(string.ascii_uppercase)

        n_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
        n_list = n_list + a_list + A_list

        result = []
        for a in n_list:
          for n in n_list:
              result.append(n + a)
        #random.seed(7)
        #random.shuffle(result)
        validate_file_nums = result[:300]

      else:
        # local
        results = []
        for i in range(3844):
            results.append(str(i).zfill(4))
        #random.seed(7)
        #random.shuffle(results)
        validate_file_nums = results[:300]

      validate_file_list_60 = [FLAGS.input_data_pattern.split('*')[0]\
                               + x +'.tfrecord' for x in validate_file_nums]
      files = validate_file_list_60
    else:
      files = gfile.Glob(data_pattern)

    if not files:
      raise IOError("Unable to find input files. data_pattern='" +
                    data_pattern + "'")
    logging.info("number of input files: " + str(len(files)))
    filename_queue = tf.train.string_input_producer(
        files, num_epochs=1, shuffle=False)
    examples_and_labels = [
        reader.prepare_reader(filename_queue) for _ in range(num_readers)
    ]

    input_data_dict = (
        tf.train.batch_join(
            examples_and_labels,
            batch_size=batch_size,
            allow_smaller_final_batch=True,
            enqueue_many=True))
    video_id_batch = input_data_dict["video_ids"]
    video_batch = input_data_dict["video_matrix"]
    num_frames_batch = input_data_dict["num_frames"]
    return video_id_batch, video_batch, num_frames_batch
예제 #29
0
def get_input_evaluation_tensors(reader,
                                 data_pattern,
                                 batch_size=1024,
                                 num_readers=1):
  """Creates the section of the graph which reads the evaluation data.

  Args:
    reader: A class which parses the training data.
    data_pattern: A 'glob' style path to the data files.
    batch_size: How many examples to process at a time.
    num_readers: How many I/O threads to use.

  Returns:
    A tuple containing the features tensor, labels tensor, and optionally a
    tensor containing the number of frames per video. The exact dimensions
    depend on the reader being used.

  Raises:
    IOError: If no files matching the given pattern were found.
  """
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    if FLAGS.image_server:
        files = ["/imatge/dsuris/documents/validationdata/yt8m_video_level/validate15.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validate3w.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validate4r.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validate8l.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validate9_.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validatea0.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateAG.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateay.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateBE.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateCH.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateDH.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateel.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateet.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateFk.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateFp.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateHW.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateI4.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateiB.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validatek4.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validateks.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validatelt.tfrecord",
                 "/imatge/dsuris/documents/validationdata/yt8m_video_level/validatem1.tfrecord"]
    else:
        files = gfile.Glob(data_pattern)

    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = [
        reader.prepare_reader(filename_queue, batch_size) for _ in range(num_readers)
    ]
    return tf.train.batch_join(
        eval_data,
        batch_size=batch_size,
        capacity=3 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True)
예제 #30
0
def get_pbs():
    all_pbs = gfile.Glob(os.path.join(models_dir(), '*.pb'))
    return all_pbs