def __init__(self): try: flags.DEFINE_string('classes', './data/coco.names', 'path to classes file') flags.DEFINE_string('weights', './checkpoints/yolov3.tf', 'path to weights file') flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny') flags.DEFINE_integer('size', 416, 'resize images to') flags.DEFINE_string('image', './data/girl.png', 'path to input image') flags.DEFINE_string('tfrecord', None, 'tfrecord instead of image') flags.DEFINE_string('output', './output.jpg', 'path to output image') flags.DEFINE_integer('num_classes', 80, 'number of classes in the model') except NameError: print("再代入禁止") self.yolo = YoloV3(classes=flags.FLAGS.num_classes) self.yolo.load_weights(flags.FLAGS.weights).expect_partial() #logging.info('weights loaded') self.class_names = [ c.strip() for c in open(flags.FLAGS.classes).readlines() ]
'T': 101.04768, 'W': 186.07931, 'V': 99.06841, 'Y': 163.06333, 'M(ox)': 147.035405, 'groupCH3': 14.01565, 'groupOH': 17.00274, 'groupH': 1.007825, 'groupH2O': 18.01057, 'groupCH3CO': 42.01057, 'groupO': 15.994915, 'groupNH3': 17.02655 } FLAGS = flags.FLAGS flags.DEFINE_string('input_data', '', 'Input data filepath.') flags.DEFINE_string('output_data_dir', '', 'Input data filepath.') flags.DEFINE_bool('clean_peptides', False, 'True if peptide modifications are in [x] format.') flags.DEFINE_string('sequence_col', _MOD_SEQUENCE, 'Modified sequence column name in the input file.') flags.DEFINE_string('charge_col', _CHARGE, 'Charge column name in the input file.') flags.DEFINE_string('fragmentation_col', _FRAGMENTATION, 'Fragmentation column name in the input file.') flags.DEFINE_string('analyzer_col', _MASS_ANALYZER, 'Mass analyzer column name in the input file.') def generate_json_inputs(data, encoding): """Generates inputs to-be stored into a JSON file.
"""Entry point for the training and testing jobs.""" # pylint: disable=invalid-name import csv import pickle import random from data_generator import DataGenerator from maml import MAML import numpy as np import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import flags FLAGS = flags.FLAGS ## Dataset/method options flags.DEFINE_string('datasource', 'sinusoid', 'sinusoid or omniglot or miniimagenet or dclaw') flags.DEFINE_string('expt_number', '0', '1 or 2 etc') flags.DEFINE_string( 'expt_name', 'intershuffle', 'non_exclusive or intrashuffle or intershuffle or sin_noise') flags.DEFINE_string( 'dclaw_pn', '1', '1 or 2 or 3; dataset permutation number for dclaw. Does differnt train/val/test splits' ) flags.DEFINE_integer( 'num_classes', 5, 'number of classes used in classification (e.g. 5-way classification).') # oracle means task id is input (only suitable for sinusoid) flags.DEFINE_string('baseline', None, 'oracle, or None') ## Training options
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tensorflow.compiler import vitis_vai from dataset import get_images_infor_from_file, ImagenetSequence from tensorflow.compat.v1 import flags import tensorflow as tf import numpy as np import threading import time keras = tf.keras # Get frozen ConcreteFunction flags.DEFINE_string('input_graph', '', 'TensorFlow \'h5\' file to load.') flags.DEFINE_string('eval_image_path', '/scratch/data/Imagenet/val_dataset', 'The directory where put the eval images') flags.DEFINE_integer('nthreads', 8, 'thread number') flags.DEFINE_integer('batch_iter', 2000, 'eval iterations') flags.DEFINE_string('mode', 'perf', 'normal or perf mode') FLAGS = flags.FLAGS filePath = "./words.txt" def run_func(): r = model(x[0])[0] fp = open(filePath, "r") data1 = fp.readlines() fp.close() result = tf.math.top_k(r, 5)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) normalizer = preprocessing.MaxAbsScaler() audio_dir = os.path.join(os.getcwd(), 'music-data') SONG_FN = 'dubstep.p' #filenames = glob.glob(audio_dir + '/fma_small/' + '/*[0-9]/*') with open(SONG_FN, 'rb') as f: filenames = pickle.load(f) flags.DEFINE_integer('batch', 32, 'Batch size') flags.DEFINE_integer('epochs', 10, 'Number of iterations to train on the entire dataset') flags.DEFINE_integer('latent', 100, 'Dimensionality of the latent space') flags.DEFINE_string('model_path', '.', 'Path to model checkpoint') flags.DEFINE_string('output_dir', '.', 'Path to model checkpoints and logs') flags.DEFINE_string('dtype', 'float32', 'Floating point data type of tensorflow graph') flags.DEFINE_boolean('train', False, 'Train the music GAN') flags.DEFINE_integer('seed', -1, 'Random seed for data shuffling and latent vector generator') flags.DEFINE_boolean('logging', False, 'Whether or not to log and checkpoint the training model') flags.DEFINE_integer('sampling_rate', 14400, 'Sampling rate of loaded music files') flags.DEFINE_float('g_lr', 1e-4, 'Learning rate of the generator') flags.DEFINE_float('d_lr', 1e-6, 'Learning rate of the discriminator') flags.DEFINE_float('dropout', 0.1, 'Dropout rate of the discriminator') flags.DEFINE_integer('g_attn', 2, 'Number of multi-head attention layers in the generator') flags.DEFINE_integer('d_attn', 4, 'Number of multi-head attention layers in the disciminator') flags.DEFINE_float('noise', 0.05, 'Level of noise added to discriminator input data') flags.DEFINE_integer('heads', 8, 'Number of heads in ALL multi-head attention blocks') flags.DEFINE_integer('d_model', 768, 'Multi-head attention dimensionality') flags.DEFINE_boolean('save_data', False, 'Save all training data to a file that will be loaded into memory')
def define(): """Define common flags.""" # yapf: disable # common_flags.define() may be called multiple times in unit tests. global _common_flags_defined if _common_flags_defined: return _common_flags_defined = True flags.DEFINE_integer('batch_size', 32, 'Batch size.') flags.DEFINE_integer('crop_width', None, 'Width of the central crop for images.') flags.DEFINE_integer('crop_height', None, 'Height of the central crop for images.') flags.DEFINE_string('train_log_dir', '/tmp/attention_ocr/train', 'Directory where to write event logs.') flags.DEFINE_string('dataset_name', 'fsns', 'Name of the dataset. Supported: fsns') flags.DEFINE_string('split_name', 'train', 'Dataset split name to run evaluation for: test,train.') flags.DEFINE_string('dataset_dir', None, 'Dataset root folder.') flags.DEFINE_string('checkpoint', '', 'Path for checkpoint to restore weights from.') flags.DEFINE_string('master', '', 'BNS name of the TensorFlow master to use.') # Model hyper parameters flags.DEFINE_float('learning_rate', 0.004, 'learning rate') flags.DEFINE_string('optimizer', 'momentum', 'the optimizer to use') flags.DEFINE_float('momentum', 0.9, 'momentum value for the momentum optimizer if used') flags.DEFINE_bool('use_augment_input', True, 'If True will use image augmentation') # Method hyper parameters # conv_tower_fn flags.DEFINE_string('final_endpoint', 'Mixed_5d', 'Endpoint to cut inception tower') # sequence_logit_fn flags.DEFINE_bool('use_attention', True, 'If True will use the attention mechanism') flags.DEFINE_bool('use_autoregression', True, 'If True will use autoregression (a feedback link)') flags.DEFINE_integer('num_lstm_units', 256, 'number of LSTM units for sequence LSTM') flags.DEFINE_float('weight_decay', 0.00004, 'weight decay for char prediction FC layers') flags.DEFINE_float('lstm_state_clip_value', 10.0, 'cell state is clipped by this value prior to the cell' ' output activation') # 'sequence_loss_fn' flags.DEFINE_float('label_smoothing', 0.1, 'weight for label smoothing') flags.DEFINE_bool('ignore_nulls', True, 'ignore null characters for computing the loss') flags.DEFINE_bool('average_across_timesteps', False, 'divide the returned cost by the total label weight')
import os import io import pandas as pd import tensorflow as tf import tensorflow.compat.v1.flags as flags import sys sys.path.append("../../models/research") from PIL import Image from object_detection.utils import dataset_util from collections import namedtuple, OrderedDict #flags = tf.app.flags flags.DEFINE_string("csv_input", "", "Path to the CSV input") flags.DEFINE_string("output_path", "", "Path to output TFRecord") flags.DEFINE_string( "label_map", "", "Path to the `label_map.pbtxt` contains the <class_name>:<class_index> pairs generated by `xml_to_csv.py` or manually.", ) # if your image has more labels input them as # flags.DEFINE_string('label0', '', 'Name of class[0] label') # flags.DEFINE_string('label1', '', 'Name of class[1] label') # and so on. flags.DEFINE_string("img_path", "", "Path to images") FLAGS = flags.FLAGS def split(df, group):
--export_dir=/tmp/attention_ocr_export """ import os import tensorflow as tf from tensorflow import app from tensorflow.contrib import slim from tensorflow.compat.v1 import flags import common_flags import model_export_lib FLAGS = flags.FLAGS common_flags.define() flags.DEFINE_string('export_dir', None, 'Directory to export model files to.') flags.DEFINE_integer( 'image_width', None, 'Image width used during training (or crop width if used)' ' If not set, the dataset default is used instead.') flags.DEFINE_integer( 'image_height', None, 'Image height used during training(or crop height if used)' ' If not set, the dataset default is used instead.') flags.DEFINE_string('work_dir', '/tmp', 'A directory to store temporary files.') flags.DEFINE_integer('version_number', 1, 'Version number of the model') flags.DEFINE_bool( 'export_for_serving', True, 'Whether the exported model accepts serialized tf.Example ' 'protos as input')
import numpy as np import tensorflow as tf from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.layers import Input from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.compat.v1 import flags from tensorflow.keras.optimizers import RMSprop from dataset import synth_input_fn from dataset import input_fn, NUM_IMAGES from dataset import get_images_infor_from_file, ImagenetSequence keras = tf.keras flags.DEFINE_string('model', './train_dir/resnet50_model_195.h5', 'TensorFlow \'GraphDef\' file to load.') flags.DEFINE_bool('eval_tfrecords', True, 'If True then use tf_records data .') flags.DEFINE_string('data_dir', '/data3/datasets/Kaggle/fruits-360/tf_records', 'The directory where put the eval images') flags.DEFINE_bool('eval_images', False, 'If True then use tf_records data .') flags.DEFINE_string('eval_image_path', '/data3/datasets/Kaggle/fruits-360/val_for_tf2', 'The directory where put the eval images') flags.DEFINE_string('eval_image_list', '/data3/datasets/Kaggle/fruits-360/val_labels.txt', 'file has validation images list') flags.DEFINE_string('save_path', "train_dir", 'The directory where save model') flags.DEFINE_string('filename', "resnet50_model_{epoch}.h5", 'The name of sved model') flags.DEFINE_integer('label_offset', 1, 'label offset') flags.DEFINE_string('gpus', '0', 'The gpus used for running evaluation.')
flags.DEFINE_integer('ps_tasks', 0, 'The number of parameter servers. If the value is 0, then' ' the parameters are handled locally by the worker.') flags.DEFINE_integer('save_summaries_secs', 60, 'The frequency with which summaries are saved, in ' 'seconds.') flags.DEFINE_integer('save_interval_secs', 600, 'Frequency in seconds of saving the model.') flags.DEFINE_integer('max_number_of_steps', int(1e10), 'The maximum number of gradient steps.') flags.DEFINE_string('checkpoint_inception', '', 'Checkpoint to recover inception weights from.') flags.DEFINE_float('clip_gradient_norm', 2.0, 'If greater than 0 then the gradients would be clipped by ' 'it.') flags.DEFINE_bool('sync_replicas', False, 'If True will synchronize replicas during training.') flags.DEFINE_integer('replicas_to_aggregate', 1, 'The number of gradients updates before updating params.') flags.DEFINE_integer('total_num_replicas', 1, 'Total number of worker replicas.') flags.DEFINE_integer('startup_delay_steps', 15,
import numpy as np import PIL.Image import tensorflow as tf from tensorflow.compat.v1 import flags from tensorflow.python.training import monitored_session import common_flags import datasets import data_provider FLAGS = flags.FLAGS common_flags.define() # e.g. ./datasets/data/fsns/temp/fsns_train_%02d.png flags.DEFINE_string('image_path_pattern', '', 'A file pattern with a placeholder for the image index.') def get_dataset_image_size(dataset_name): # Ideally this info should be exposed through the dataset interface itself. # But currently it is not available by other means. ds_module = getattr(datasets, dataset_name) height, width, _ = ds_module.DEFAULT_CONFIG['image_shape'] return width, height def load_images(file_pattern, batch_size, dataset_name): width, height = get_dataset_image_size(dataset_name) images_actual_data = np.ndarray(shape=(batch_size, height, width, 3), dtype='uint8') for i in range(batch_size):
from tensorflow.keras.layers import Input, Dense from tensorflow.keras.models import Model from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.compat.v1 import flags from tensorflow.keras import optimizers import sys, os import config as conf #set up and parse custom flags flags.DEFINE_integer('model_version', conf.version, "Width of the image") flags.DEFINE_boolean('rebuild', False, "Drop the checkpoint weights and rebuild model from scratch") flags.DEFINE_string('lib_folder', conf.lib_folder, "Local library folder") FLAGS = flags.FLAGS #mount the library folder sys.path.append(os.path.abspath(FLAGS.lib_folder)) from data import MNISTProcessor import visualizer as v #load data data_processor = MNISTProcessor(conf.data_path, conf.train_labels, conf.train_images, '', '') x_data_train, y_data_train = data_processor.load_train(normalize=True).get_training_data() #initialize the network input_layer = Input(shape=(784,), name='input') network = Dense(152, activation='tanh', name='dense_1')(input_layer) network = Dense(76, activation='tanh', name='dense_2')(network) network = Dense(38, activation='tanh', name='dense_3')(network) network = Dense(4, activation='tanh', name='dense_4')(network) network = Dense(38, activation='tanh', name='dense_5')(network)
_ION_COLUMNS = ['b', 'b-H2O', 'b-NH3', 'y', 'y-H2O', 'y-NH3'] _MOD_SEQUENCE = 'ModifiedSequence' _PRECURSOR_CHARGE = 'PrecursorCharge' _CHARGE = 'Charge' _FRAGMENTATION = 'Fragmentation' _MASS_ANALYZER = 'MassAnalyzer' _LENGTH = 'Length' _POSITION_COL = 'FragmentNumber' _ION_COL = 'FragmentType' _ABUNDANCE_COL = 'RelativeIntensity' _LOSS_TYPE = 'FragmentLossType' _FRAG_CHARGE = 'FragmentCharge' FLAGS = flags.FLAGS flags.DEFINE_string('metadata_file', None, 'Path to a TSV file with metadata.') flags.DEFINE_string('input_data_pattern', None, 'Input data filename pattern.') flags.DEFINE_enum('label_dim', '2', ['2', '6'], 'Number of features in the output/label time step') flags.DEFINE_string('output_data_dir', None, 'Directory with prediction outputs.') flags.DEFINE_bool('neutral_losses', False, 'True if H2O and NH3 losses are modeled.') flags.DEFINE_bool( 'batch_prediction', True, 'True if batch prediction instead of online was used to generate outputs.') flags.DEFINE_string( 'add_input_data_pattern', None, ('Input data filename pattern for additional features to-be included to ' 'the final outptu. These inputs should be formatted in the same way as' 'the model outputs - ie, JSON format with "key" and "output" values,'
import split_train_test import fit_lstm import evaluate import SAVE import make_prediction from tensorflow.compat.v1 import flags import pandas as pd import os flags.DEFINE_string("path", "./data/catgwise/생활+건강/", "path to data file") flags.DEFINE_string("click_data", "clicks_ma_ratio", "clicks_minmax, clicks_first_ratio, clicks_ma_ratio") flags.DEFINE_integer("s", 60, "seasonality") flags.DEFINE_float("dropout", 0, "dropout rate(default=0)") flags.DEFINE_integer("epoch", 40, "epoch") flags.DEFINE_integer("batch_size", 1, "batch size") flags.DEFINE_integer("pred_time", 30, "how much time to predict") flags.DEFINE_string("pred_index", "05-01-2020", "when beginning prediction(month-date-year), default:'01-01-2020'") flags.DEFINE_boolean("bi", True,"true if bidirectional") FLAGS = flags.FLAGS catg_lst = os.listdir(FLAGS.path) #temppollsell=[[True, True, True], [True, True, False], [True, False, True], [True, False, False], #[False, True, True], [False, True, False], [False, False, True], [False,False,False]] temppollsell=[[True, True, False], [True, False, False], [False, True, False], [False,False,False]] for category in catg_lst: data_path = "{}{}".format(FLAGS.path, category) file = pd.read_csv(data_path, encoding='CP949') file['date'] = pd.to_datetime(file['date'])
import tensorflow as tf from tensorflow.contrib import slim from tensorflow import app from tensorflow.compat.v1 import flags import data_provider import common_flags FLAGS = flags.FLAGS common_flags.define() # yapf: disable flags.DEFINE_integer('num_batches', 100, 'Number of batches to run eval for.') flags.DEFINE_string('eval_log_dir', '/tmp/attention_ocr/eval', 'Directory where the evaluation results are saved to.') flags.DEFINE_integer('eval_interval_secs', 60, 'Frequency in seconds to run evaluations.') flags.DEFINE_integer('number_of_steps', None, 'Number of times to run evaluation.') # yapf: enable def main(_): if not tf.io.gfile.exists(FLAGS.eval_log_dir): tf.io.gfile.makedirs(FLAGS.eval_log_dir) dataset = common_flags.create_dataset(split_name=FLAGS.split_name) model = common_flags.create_model(dataset.num_char_classes,