Beispiel #1
0
import readers
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
from tensorflow.python.client import device_lib
import utils
import model_utils

FLAGS = flags.FLAGS

if __name__ == "__main__":
    # Dataset flags.
    flags.DEFINE_string("train_dir", "/tmp/yt8m_model/",
                        "The directory to save the model files in.")
    # There are three data pattern variables in case data were scattered across
    # multiple hard-drives. On single machine it helps with IO.
    flags.DEFINE_string(
        "train_data_pattern", "",
        "File glob for the training dataset. If the files refer to Frame Level "
        "features (i.e. tensorflow.SequenceExample), then set --reader_type "
        "format. The (Sequence)Examples are expected to have 'rgb' byte array "
        "sequence feature as well as a 'labels' int64 context feature.")
    flags.DEFINE_string("train_data_pattern2", "",
                        "additional training dataset.")
    flags.DEFINE_string("train_data_pattern3", "",
                        "additional training dataset.")
    flags.DEFINE_string("eval_data_pattern", "",
                        "File glob for the evaluation dataset.")
    flags.DEFINE_string("feature_names", "mean_rgb", "Name of the feature "
Beispiel #2
0
from deepvariant.protos import deepvariant_pb2

_ALLOW_EXECUTION_HARDWARE = [
    'auto',  # Default, no validation.
    'cpu',  # Don't use accelerators, even if available.
    'accelerator',  # Must be hardware acceleration or an error will be raised.
]

# The number of digits past the decimal point that genotype likelihoods are
# rounded to, for numerical stability.
_GL_PRECISION = 10

FLAGS = flags.FLAGS

flags.DEFINE_string(
    'examples', None,
    'Required. tf.Example protos containing DeepVariant candidate variants in '
    'TFRecord format, as emitted by make_examples.')
flags.DEFINE_string(
    'outfile', None,
    'Required. Destination path where we will write output candidate variants '
    'with additional likelihood information in TFRecord format of '
    'CallVariantsOutput protos.')
flags.DEFINE_string(
    'checkpoint', None,
    'Required. Path to the TensorFlow model checkpoint to use to evaluate '
    'candidate variant calls.')
flags.DEFINE_integer(
    'batch_size', 512,
    'Number of candidate variant tensors to batch together during inference. '
    'Larger batches use more memory but are more computational efficient.')
flags.DEFINE_integer('max_batches', None,
import readers
import frame_level_models
import video_level_models
import eval_util
from tensorflow.python.lib.io import file_io
from tensorflow import app
from tensorflow import logging
from tensorflow import flags
from tensorflow import gfile
from datetime import datetime
import tensorflow.contrib.slim as slim
from tensorflow.python import pywrap_tensorflow

FLAGS = flags.FLAGS

flags.DEFINE_string("Ensemble_Models", "./",
                    "the directory to store models for ensemble.")
flags.DEFINE_string("ensemble_model_path", None,
                    "the files to store models for ensemble.")
flags.DEFINE_string("ensemble_output_path", None,
                    "the files to store ensembled models.")
flags.DEFINE_string("eval_data_pattern", "", "")
flags.DEFINE_integer("num_readers", 8, "")
flags.DEFINE_integer("batch_size", 128, "")
flags.DEFINE_integer("top_k", 20, "")
flags.DEFINE_boolean("run_once", True, "")
flags.DEFINE_boolean("restore_once", False, "restore checkpoint once")
flags.DEFINE_integer("random_seed", 666, "")
flags.DEFINE_integer("tile_num", 10, "the number of sample copies")
tf.set_random_seed(FLAGS.random_seed)

contents, points = tool.loading_rdata(data_path)
contents = tool.cut(contents, cut=2)

# tranform document to vector
max_document_length = 200
x, vocabulary, vocab_size = tool.make_input(contents, max_document_length)
print('사전단어수 : %s' % (vocab_size))
y = tool.make_output(points, threshold=2.5)

# divide dataset into train/test set
x_train, x_test, y_train, y_test = tool.divide(x, y, train_prop=0.8)

# Model Hyperparameters
flags.DEFINE_integer("embedding_dim", 128,
                     "Dimensionality of embedded vector (default: 128)")
flags.DEFINE_string("filter_sizes", "3,4,5",
                    "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 128,
                     "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.5,
                   "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.1,
                   "L2 regularization lambda (default: 0.0)")

# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_integer("num_epochs", 10,
                     "Number of training epochs (default: 200)")
flags.DEFINE_integer(
    "evaluate_every", 100,
    "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 100,
Beispiel #5
0
import readers
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.lib.io import file_io
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
from tensorflow.python.client import device_lib
import utils

FLAGS = flags.FLAGS

if __name__ == "__main__":
    # Dataset flags.
    flags.DEFINE_string("train_dir", "/tmp/yt8m_model/",
                        "The directory to save the model files in.")
    flags.DEFINE_string(
        "train_data_pattern", "",
        "File glob for the training dataset. If the files refer to Frame Level "
        "features (i.e. tensorflow.SequenceExample), then set --reader_type "
        "format. The (Sequence)Examples are expected to have 'rgb' byte array "
        "sequence feature as well as a 'labels' int64 context feature.")
    flags.DEFINE_string("feature_names", "mean_rgb", "Name of the feature "
                        "to use for training.")
    flags.DEFINE_string("feature_sizes", "1024",
                        "Length of the feature vectors.")

    # Model flags.
    flags.DEFINE_bool(
        "frame_features", False,
        "If set, then --train_data_pattern must be frame-level features. "
Beispiel #6
0
import time
from tensorflow import app
from tensorflow import flags
from urllib.parse import quote

FLAGS = flags.FLAGS

# In OpenCV3.X, this is available as cv2.CAP_PROP_POS_MSEC
# In OpenCV2.X, this is available as cv2.cv.CV_CAP_PROP_POS_MSEC
CAP_PROP_POS_MSEC = 0
IMG_FORMATS = ['.jpg', '.jpeg', '.png', '.JPG', '.JPEG', '.PNG']

if __name__ == '__main__':
    # Required flags for input and output.
    flags.DEFINE_string(
        'output_tfrecords_file', None,
        'File containing tfrecords will be written at this path.')
    flags.DEFINE_string(
        'input_videos_csv', None,
        'CSV file with lines "<video_file>,<labels>", where '
        '<video_file> must be a path of a video and <labels> '
        'must be an integer list joined with semi-colon ";"')
    # Optional flags.
    flags.DEFINE_string(
        'model_dir', os.path.join(format(os.getenv('HOME')), 'yt8m'),
        'Directory to store model files. It defaults to ~/yt8m')

    # The following flags are set to match the YouTube-8M dataset format.
    flags.DEFINE_integer('frames_per_second', 1,
                         'This many frames per second will be processed')
    flags.DEFINE_string(
Beispiel #7
0
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''

import numpy as np
import random
import tensorflow as tf
from tensorflow import flags

from evaluation.evaluator import Evaluator
from learning_baseline.feature_based.build_prediction import BuildPrediction, BuildPredictions
from learning_baseline.feature_based.input import Dictionary, ReadExamples, GetInputPlaceholders, GetFeedDict, ReadQuestionAnnotations
from learning_baseline.feature_based.graph import GetLogits, GetVariables
from utils.squad_utils import ReconstructStrFromSpan

FLAGS = flags.FLAGS
flags.DEFINE_string('input-articles', 'dataset/dev-annotatedpartial.proto', '')
flags.DEFINE_string('input-features', 'dataset/dev-featuresbucketized.proto',
                    '')
flags.DEFINE_string('input-featuredict',
                    'dataset/featuredictbucketized-25000.proto', '')
flags.DEFINE_integer('min-articles', None, '')

if __name__ == '__main__':
    dictionary = Dictionary(FLAGS.input_featuredict)
    feature_index = dictionary.GetIndex('Dep Path NN - conj -> NN')

    examples = ReadExamples(FLAGS.input_features, dictionary,
                            FLAGS.min_articles)
    question_annotations = ReadQuestionAnnotations(FLAGS.input_articles)

    for example in examples:
Beispiel #8
0
from tensorflow import flags
import utils.yaml_config as yaml_config
import yaml

FLAGS_0 = flags.FLAGS

# read additional params from console
flags.DEFINE_string(
    "model_dir", "/models/graph_models/models_something_something_new/tmp_model", "")
flags.DEFINE_string("config_file", "./configs/smt_config.yaml", "")
flags.DEFINE_integer("rand_no", 1241322, "")

# read params from config file
def read_params(save=True):
    FLAGS = yaml_config.read_config(FLAGS_0.config_file)

    # add console params to config params
    FLAGS.model_dir = FLAGS_0.model_dir
    FLAGS.rand_no = FLAGS_0.rand_no

    # save git info
    FLAGS.git_info = yaml_config.get_git_info()
    FLAGS.num_eval_clips = FLAGS.num_eval_spatial_clips * FLAGS.num_eval_temporal_clips
    name_yaml = FLAGS.model_dir + f'/config_{FLAGS.rand_no}.yaml'
    print(name_yaml)
    if save:
        # save current config file
        with open(name_yaml, 'w') as outfile:
            yaml.dump(yaml_config.namespace_to_dict(FLAGS),
                      outfile, default_flow_style=False)
Beispiel #9
0
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging

import eval_util
import losses
import readers
import utils
import numpy as np
import labels_autoencoder

FLAGS = flags.FLAGS

if __name__ == '__main__':
    flags.DEFINE_string("train_dir", "/tmp/yt8m_model/",
                        "The directory to load the model files from.")
    flags.DEFINE_string("model_checkpoint_path", "",
                        "The file path to load the model from.")
    flags.DEFINE_string("output_file", "",
                        "The file to save the predictions to.")
    flags.DEFINE_string(
        "input_data_pattern", "",
        "File glob defining the evaluation dataset in tensorflow.SequenceExample "
        "format. The SequenceExamples are expected to have an 'rgb' byte array "
        "sequence feature as well as a 'labels' int64 context feature.")

    # Model flags.
    flags.DEFINE_bool(
        "frame_features", False,
        "If set, then --eval_data_pattern must be frame-level features. "
        "Otherwise, --eval_data_pattern must be aggregated video-level "
Beispiel #10
0
import models
import tensorflow as tf
import utils

from tensorflow import flags
import tensorflow.contrib.slim as slim

FLAGS = flags.FLAGS
flags.DEFINE_integer(
    "moe_num_mixtures", 2,
    "The number of mixtures (excluding the dummy 'expert') used for MoeModel.")
flags.DEFINE_float("moe_l2", 1e-8, "L2 penalty for MoeModel.")
flags.DEFINE_integer("moe_low_rank_gating", -1,
                     "Low rank gating for MoeModel.")
flags.DEFINE_bool("moe_prob_gating", False, "Prob gating for MoeModel.")
flags.DEFINE_string("moe_prob_gating_input", "prob",
                    "input Prob gating for MoeModel.")

flags.DEFINE_integer("num_supports", 8, "num_supports for chain.")

#--------------

flags.DEFINE_integer("deep_chain_layers", 3,
                     "The number of layers used for DeepChainModel")
flags.DEFINE_integer("deep_chain_relu_cells", 128,
                     "The number of relu cells used for DeepChainModel")
flags.DEFINE_string(
    "deep_chain_relu_type", "relu",
    "The type of relu cells used for DeepChainModel (options are elu and relu)"
)

flags.DEFINE_bool("deep_chain_use_length", False,
Beispiel #11
0
import tensorflow as tf
from GAN_cond_G_D_vae_gen import networks
from gan_tf_examples.mnist import util
from tensorflow import flags
import tensorflow.contrib.gan as tfgan
from gan_tf_examples.mnist.data_provider import provide_data
from VAE.variational_autoencoder import VAE


flags.DEFINE_integer('batch_size', 64, 'The number of images in each batch.')

flags.DEFINE_string('train_log_dir', 'lul',
                    'Directory where to write event logs.')

flags.DEFINE_string('dataset_dir', "../data/mnist", 'Location of data.')

flags.DEFINE_string('vae_checkpoint_folder', None, 'Location of the saved VAE model')



flags.DEFINE_integer('max_number_of_steps', 20000,
                     'The maximum number of gradient steps.')

flags.DEFINE_string(
    'gan_type', 'unconditional',
    'Either `unconditional`, `conditional`, or `infogan`.')

flags.DEFINE_integer(
    'grid_size', 8, 'Grid size for image visualization.')

flags.DEFINE_integer(
Beispiel #12
0
import video_level_models
import readers
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
import utils

FLAGS = flags.FLAGS

if __name__ == "__main__":
  # Dataset flags.
  flags.DEFINE_string("train_dir", "/tmp/yt8m_model/",
                      "The directory to load the model files from. "
                      "The tensorboard metrics files are also saved to this "
                      "directory.")
  flags.DEFINE_string(
      "eval_data_pattern", "",
      "File glob defining the evaluation dataset in tensorflow.SequenceExample "
      "format. The SequenceExamples are expected to have an 'rgb' byte array "
      "sequence feature as well as a 'labels' int64 context feature.")

  # Other flags.
  flags.DEFINE_integer("batch_size", 1024,
                       "How many examples to process per batch.")
  flags.DEFINE_integer("num_readers", 8,
                       "How many threads to use for reading input files.")
  flags.DEFINE_boolean("run_once", False, "Whether to run eval only once.")
  flags.DEFINE_integer("top_k", 20, "How many predictions to output per video.")
Specifically, this checks whether the provided record sizes are consistent and
that the file does not end in the middle of a record. It does not verify the
CRCs.
"""

import struct
import tensorflow as tf

from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging

flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input_data_pattern", "",
                    "File glob defining for the TFRecords files.")


def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)
  logging.info(FLAGS.input_data_pattern)
  paths = gfile.Glob(FLAGS.input_data_pattern)
  logging.info("Found %s files.", len(paths))
  for path in paths:
    with gfile.Open(path, "r") as f:
      first_read = True
      while True:
        length_raw = f.read(8)
        if not length_raw and first_read:
          logging.fatal("File %s has no data.", path)
          break
flags.DEFINE_boolean('use_cuda', True, 'use Cuda')

flags.DEFINE_float('meta_lr', None, 'meta-optimization learning rate')
flags.DEFINE_float('exp_decay', 0.9, 'exp decay constant')

flags.DEFINE_float('beta1', 0.9, 'adam beta1')
flags.DEFINE_float('beta2', 0.999, 'adam beta2')
flags.DEFINE_float('adam_eps', 1e-8, 'adam eps')

flags.DEFINE_float('mnist_momentum', 0.9, 'momentum of learner on mnist')

flags.DEFINE_float('warm_start_lr', 0.1, 'warm start learning rate')

flags.DEFINE_integer('warm_start_steps', 50, 'warm start steps')

flags.DEFINE_string('optimizer', 'sgd', 'sgd adam or mom')
flags.DEFINE_float('momentum', 0.9, 'momentum for SGD')

flags.DEFINE_integer('batch_size', 100, 'batch size')

flags.DEFINE_float('init_lr', 0.01, 'init lr')
flags.DEFINE_float('init_decay', 0.1, 'init decay')

flags.DEFINE_float('norm_clip', -1.0, 'clip grads to this norm before doing RT')
flags.DEFINE_float('post_clip', 1.0, 'clip before applying grads')

flags.DEFINE_integer('train_horizon', 10, 'truncated horizon of problem')
flags.DEFINE_integer('test_horizon', 10, 'full horizon of problem')
flags.DEFINE_integer('test_frequency', 5, 'test freq')
flags.DEFINE_integer('calibrate_frequency', 5, 'calibrate freq')
flags.DEFINE_boolean('compute_penalty', False, 'penalize RT due to multiple '
Beispiel #15
0
import tensorflow as tf
import numpy as np
import models
from tensorflow.examples.tutorials.mnist import input_data
from IPython import embed
from tensorflow import flags
FLAGS = flags.FLAGS
flags.DEFINE_string("log_dir", "./logs/default",
                    "default summary/checkpoint directory")
flags.DEFINE_float("learning_rate", 0.01, "base learning rate")
flags.DEFINE_string("model", "DNN", "model name")
flags.DEFINE_string("optimizer", "GradientDescentOptimizer",
                    "kind of optimizer to use.")
flags.DEFINE_integer("batch_size", 1024, "default batch size.")
flags.DEFINE_integer("max_steps", 10000, "number of max iteration to train.")


def main(_):
    mnist = input_data.read_data_sets("./data", one_hot=True)

    # defien model input: image and ground-truth label
    model_inputs = tf.placeholder(dtype=tf.float32, shape=[None, 784])
    labels = tf.placeholder(dtype=tf.float32, shape=[None, 10])

    model = getattr(models, FLAGS.model, None)()
    predictions = model.create_model(model_inputs)

    # define cross entropy loss term
    loss = tf.losses.softmax_cross_entropy(onehot_labels=labels,
                                           logits=predictions)
Beispiel #16
0
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging

import readers
import utils

#%%
FLAGS = flags.FLAGS

if __name__ == '__main__':
    flags.DEFINE_string(
        "input_data_pattern", "",
        "File glob defining the evaluation dataset in tensorflow.SequenceExample "
        "format. The SequenceExamples are expected to have an 'rgb' byte array "
        "sequence feature as well as a 'labels' int64 context feature.")
    flags.DEFINE_string("input_data_pattern2", "", "Additional data files.")
    flags.DEFINE_string("input_data_pattern3", "", "More data files.")

    flags.DEFINE_string("output_file", "",
                        "The file to save the l2 params to.")

    # Model flags.
    flags.DEFINE_bool(
        "frame_features", False,
        "If set, then --eval_data_pattern must be frame-level features. "
        "Otherwise, --eval_data_pattern must be aggregated video-level "
        "features. The model must also be set appropriately (i.e. to read 3D "
        "batches VS 4D batches.")
import eval_util
import losses
import ensemble_level_models
import readers
import tensorflow as tf
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
import utils

FLAGS = flags.FLAGS

if __name__ == "__main__":
    # Dataset flags.
    flags.DEFINE_string("model_checkpoint_path", "",
                        "The file to load the model files from. ")
    flags.DEFINE_string("train_dir", "/tmp/yt8m/",
                        "The directory to write the result in. ")
    flags.DEFINE_string(
        "eval_data_patterns", "",
        "File globs defining the evaluation dataset in tensorflow.SequenceExample format."
    )
    flags.DEFINE_string("input_data_pattern", None,
                        "File globs for original model input.")
    flags.DEFINE_string("feature_names", "predictions", "Name of the feature "
                        "to use for training.")
    flags.DEFINE_string("feature_sizes", "3862",
                        "Length of the feature vectors.")

    # Model flags.
    flags.DEFINE_string("model", "LinearRegressionModel",
Beispiel #18
0
import sys
sys.path.append("..")
import config.Config
import models
import tensorflow as tf
import numpy as np
import os
import codecs

from tensorflow import app
from tensorflow import flags

FLAGS = flags.FLAGS
flags.DEFINE_string('gpu', '7', 'gpu will be used')
flags.DEFINE_string('data_path', '../benchmarks/kg_100k/', 'path of data')
flags.DEFINE_string('save_path', '../res/kg_100k/transe', 'path of save model and data')

# hyperparameter
flags.DEFINE_integer('threads', 8, 'work threads')
flags.DEFINE_integer('epochs', 1000, 'train epochs')
flags.DEFINE_integer('batch_size', 128, 'batch size')
flags.DEFINE_integer('embed_dim', 300, 'embedding dimension')
flags.DEFINE_string('opt', 'SGD', 'optimition method')

def main(_):
    cuda_list = FLAGS.gpu
    data_path = FLAGS.data_path
    save_path = FLAGS.save_path
    if not os.path.exists(save_path):
        os.makedirs(save_path)
Beispiel #19
0
import MDLSTM.datareader as readers
import MDLSTM.export_model as export_model
import MDLSTM.eval_util as eval_util
import MDLSTM.model as models

import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow import app, flags, gfile, logging
import MDLSTM.utils
from MDLSTM.utils import task_as_string

FLAGS = flags.FLAGS

if __name__ == "__main__":
    # Define dataset flags
    flags.DEFINE_string("train_dir", "./mdlstm_train/", "Directory to save models in")

    flags.DEFINE_string("train_data_pattern", "", "File glob for the training dataset")

    flags.DEFINE_string("test_data_pattern", "", "File Glob for the test set.")

    flags.DEFINE_string("feature_names", "mean_rgb", "Name of the feature to use for training")

    flags.DEFINE_string("feature_sizes", "1024", "Length of feature vectors")

    # Model Flags
    flags.DEFINE_bool("slice_features", True, "If set, the input should have 4 dimensions.")

    flags.DEFINE_string("vocab_path", "vocabulary.txt", "Which vocab to use in order to help prediction.")

    flags.DEFINE_bool("start_new_model", False, "If set, this will not resume from a checkpoint and will instead create a new model")
Beispiel #20
0
import traceback
import time
import random as rd
import glob
import tensorflow.contrib.slim as slim
from tensorflow import flags
from tensorflow import app
from tensorflow import logging
import math
os.environ['CUDA_VISIBLE_DEVICES'] = '1,2'

FLAGS = flags.FLAGS

if __name__ == '__main__':
    flags.DEFINE_string(
        'data_dir',
        '/data1/sina_recmd/simba/trunk/src/content_analysis/douyin/code/data/douyin_tfrecord',
        '')
    flags.DEFINE_string(
        'train_dir',
        '/data1/sina_recmd/simba/trunk/src/content_analysis/douyin/code/data/cdssm_output',
        '')
    flags.DEFINE_float('dropout_keep_prob', 0.9, 'dropout keep prob')
    flags.DEFINE_integer('batch_size', 50, 'batch size')
    flags.DEFINE_integer('NEG', 9, 'NEG size')

    flags.DEFINE_float('initial_lr', 0.001, '')
    flags.DEFINE_float('lr_decay_factor', 0.7, '')
    flags.DEFINE_integer('num_epochs_before_decay', 5, '')

    flags.DEFINE_float('l2_reg_lambda', 0.05, '')
    flags.DEFINE_integer('num_epochs', 30000, 'num_epochs')
Beispiel #21
0
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging

import eval_util
import losses
import readers
import utils

FLAGS = flags.FLAGS

if __name__ == '__main__':
    # Input
    flags.DEFINE_string(
        "train_dir", "",
        "The directory to load the model files from. We assume "
        "that you have already run eval.py onto this, such that "
        "inference_model.* files already exist.")
    flags.DEFINE_string(
        "input_data_pattern", "",
        "File glob defining the evaluation dataset in tensorflow.SequenceExample "
        "format. The SequenceExamples are expected to have an 'rgb' byte array "
        "sequence feature as well as a 'labels' int64 context feature.")
    flags.DEFINE_string(
        "input_model_tgz", "",
        "If given, must be path to a .tgz file that was written "
        "by this binary using flag --output_model_tgz. In this "
        "case, the .tgz file will be untarred to "
        "--untar_model_dir and the model will be used for "
        "inference.")
    flags.DEFINE_string(
import sys
from tensorflow import flags

FLAGS = flags.FLAGS

if __name__ == "__main__":
    flags.DEFINE_string("train_path", "",
                        "The directory where training files locates.")
    flags.DEFINE_string("candidates", "", "The candidate methods.")

if __name__ == "__main__":
    candidate_methods = map(lambda x: x.strip(),
                            FLAGS.candidates.strip().split(","))
    train_path = FLAGS.train_path
    output_path = ",".join(
        map(lambda x: "%s/%s/*.tfrecord" % (train_path, x), candidate_methods))
    sys.stdout.write(output_path)
    sys.stdout.flush()
Beispiel #23
0
import test_util
import utils
import image_models
import readers
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging

FLAGS = flags.FLAGS

if __name__ == "__main__":
    # Dataset flags.
    flags.DEFINE_string("train_dir", "model",
                        "The directory to load the model files from.")
    flags.DEFINE_string("model_checkpoint_path", "",
                        "The file to load the model files from. ")
    flags.DEFINE_string("output_file", "test.out",
                        "File that contains the csv predictions")
    flags.DEFINE_string("test_data_list", None,
                        "List that contains testing data path")
    flags.DEFINE_string("test_data_pattern", "test-data/*.tfrecord",
                        "Pattern for testing data path")
    flags.DEFINE_integer("image_width", 1918, "Width of the image.")
    flags.DEFINE_integer("image_height", 1280, "Height of the image.")
    flags.DEFINE_integer("image_channels", 3, "Channels of the image.")

    flags.DEFINE_string(
        "model", "BasicUNetModel",
        "Which architecture to use for the model. Models are defined "
Beispiel #24
0
import six

from pydub import AudioSegment

import vggish_input
import vggish_params
import vggish_postprocess
import vggish_slim
from subprocess import call

FLAGS = flags.FLAGS

if __name__ == '__main__':
    flags.DEFINE_string(
        'input_youtube_id_tsv',
        '/Users/julia/PSVA/data/output/balanced/youtube_balanced_train.txt',
        'TSV file with lines "<id>\t<start_time>\t<end_time>\t<label>" where '
        ' and <labels> '
        'must be an integer list joined with semi-colon ";"')

    flags.DEFINE_string('output_dir', '/Users/julia/PSVA/data/output/balanced',
                        'where to save the wav file')


def main(unused_argv):
    print(FLAGS.input_youtube_id_tsv)

    f = open(FLAGS.output_dir + "/video_path_label.txt", "w+")
    i = 0

    for youtube_id, st_time, end_time, label in csv.reader(open(
            FLAGS.input_youtube_id_tsv),
Beispiel #25
0
import models
import tensorflow as tf
import utils

from tensorflow import flags
import tensorflow.contrib.slim as slim

FLAGS = flags.FLAGS
# flags.DEFINE_integer(
#     "moe_num_mixtures", 2,
#     "The number of mixtures (excluding the dummy 'expert') used for MoeModel.")
flags.DEFINE_float("moe_l2", 1e-8, "L2 penalty for MoeModel.")
flags.DEFINE_integer("moe_low_rank_gating", -1,
                     "Low rank gating for MoeModel.")
flags.DEFINE_bool("moe_prob_gating", True, "Prob gating for MoeModel.")
flags.DEFINE_string("moe_prob_gating_input", "prob",
                    "input Prob gating for MoeModel.")


class LogisticModel(models.BaseModel):
    """Logistic model with L2 regularization."""
    def create_model(self,
                     model_input,
                     vocab_size,
                     l2_penalty=1e-8,
                     **unused_params):
        """Creates a logistic model.

    Args:
      model_input: 'batch' x 'num_features' matrix of input features.
      vocab_size: The number of classes in the dataset.
Beispiel #26
0
from tensorflow import flags
# Model Hyperparameters
flags.DEFINE_integer("embedding_dim", 50,
                     "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "45",
                    "Comma-separated filter sizes (default: '3,4,5')")
#flags.DEFINE_string("filter_sizes", "5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 65,
                     "Number of filters per filter size (default: 128)")
flags.DEFINE_integer("hidden_num", 128,
                     "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1,
                   "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.0006,
                   "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.0001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss", "point_wise", "loss function (default:point_wise)")
flags.DEFINE_integer('extend_feature_dim', 10, 'overlap_feature_dim')
# Training parameters
flags.DEFINE_integer("batch_size", 128, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False,
                     "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epochs", 30,
                     "Number of training epochs (default: 200)")
flags.DEFINE_integer(
    "evaluate_every", 500,
    "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500,
                     "Save model after this many steps (default: 100)")
Beispiel #27
0
from tensorflow import flags

FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 30, "Number of frames per batch for DBoF.")
flags.DEFINE_bool("dbof_add_batch_norm", True,
                  "Adds batch normalization to the DBoF model.")
flags.DEFINE_bool(
    "sample_random_frames", True,
    "If true samples random frames (for frame level models). If false, a random"
    "sequence of frames is sampled instead.")
flags.DEFINE_integer("dbof_cluster_size", 8192,
                     "Number of units in the DBoF cluster layer.")
flags.DEFINE_integer("dbof_hidden_size", 1024,
                     "Number of units in the DBoF hidden layer.")
flags.DEFINE_string(
    "dbof_pooling_method", "max",
    "The pooling method used in the DBoF cluster layer. "
    "Choices are 'average' and 'max'.")
flags.DEFINE_string(
    "video_level_classifier_model", "MoeModel",
    "Some Frame-Level models can be decomposed into a "
    "generalized pooling operation followed by a "
    "classifier layer")
flags.DEFINE_integer("lstm_cells", 1024, "Number of LSTM cells.")
flags.DEFINE_integer("lstm_layers", 2, "Number of LSTM layers.")


class FrameLevelLogisticModel(models.BaseModel):
    def create_model(self, model_input, vocab_size, num_frames,
                     **unused_params):
        """Creates a model which uses a logistic classifier over the average of the
    frame-level features.
Beispiel #28
0
import video_level_models
import readers
import tensorflow as tf
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
import utils

FLAGS = flags.FLAGS

if __name__ == "__main__":
    # Dataset flags.
    flags.DEFINE_string(
        "train_dir", "/tmp/yt8m_model/",
        "The directory to load the model files from. "
        "The tensorboard metrics files are also saved to this "
        "directory.")
    flags.DEFINE_string(
        "eval_data_pattern", "",
        "File glob defining the evaluation dataset in tensorflow.SequenceExample "
        "format. The SequenceExamples are expected to have an 'rgb' byte array "
        "sequence feature as well as a 'labels' int64 context feature.")
    flags.DEFINE_string("feature_names", "mean_rgb", "Name of the feature "
                        "to use for training.")
    flags.DEFINE_string("feature_sizes", "1024",
                        "Length of the feature vectors.")

    # Model flags.
    flags.DEFINE_bool(
        "frame_features", False,
Beispiel #29
0
    'W': 186.07931,
    'V': 99.06841,
    'Y': 163.06333,
    'M(ox)': 147.035405,
    'groupCH3': 14.01565,
    'groupOH': 17.00274,
    'groupH': 1.007825,
    'groupH2O': 18.01057,
    'groupCH3CO': 42.01057,
    'groupO': 15.994915,
    'groupNH3': 17.02655}


FLAGS = flags.FLAGS
flags.DEFINE_string(
    'input_data',
    '',
    'Input data filepath.')
flags.DEFINE_string(
    'output_data_dir',
    '',
    'Input data filepath.')
flags.DEFINE_bool(
    'clean_peptides',
    True,
    'True if peptide modifications are in [x] format.')
flags.DEFINE_string(
    'sequence_col',
    _MOD_SEQUENCE,
    'Modified sequence column name in the input file.')
flags.DEFINE_string(
    'charge_col',
Beispiel #30
0
flags.DEFINE_integer('aln_match', 4,
                     'Match score (expected to be a non-negative score).')
flags.DEFINE_integer('aln_mismatch', 6,
                     'Mismatch score (expected to be a non-negative score).')
flags.DEFINE_integer(
    'aln_gap_open', 8, 'Gap open score (expected to be a non-negative score). '
    'Score for a gap of length g is -(gap_open + (g - 1) * gap_extend).')
flags.DEFINE_integer(
    'aln_gap_extend', 1,
    'Gap extend score (expected to be a non-negative score). '
    'Score for a gap of length g is -(gap_open + (g - 1) * gap_extend).')
flags.DEFINE_integer('aln_k', 23, 'k-mer size used to index target sequence.')
flags.DEFINE_float('aln_error_rate', .01, 'Estimated sequencing error rate.')
flags.DEFINE_string(
    'realigner_diagnostics', '',
    'Root directory where the realigner should place diagnostic output (such as'
    ' a dump of the DeBruijn graph, and a log of metrics reflecting the graph '
    'and  realignment to the haplotypes).  If empty, no diagnostics are output.'
)
flags.DEFINE_bool(
    'emit_realigned_reads', False,
    'If True, we will emit realigned reads if our realigner_diagnostics are '
    'also enabled.')

# Margin added to the reference sequence for the aligner module.
_REF_ALIGN_MARGIN = 20

# ---------------------------------------------------------------------------
# Set configuration settings.
# ---------------------------------------------------------------------------