예제 #1
0
파일: detect.py 프로젝트: rurusasu/Python
    def __init__(self):
        try:
            flags.DEFINE_string('classes', './data/coco.names',
                                'path to classes file')
            flags.DEFINE_string('weights', './checkpoints/yolov3.tf',
                                'path to weights file')
            flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
            flags.DEFINE_integer('size', 416, 'resize images to')
            flags.DEFINE_string('image', './data/girl.png',
                                'path to input image')
            flags.DEFINE_string('tfrecord', None, 'tfrecord instead of image')
            flags.DEFINE_string('output', './output.jpg',
                                'path to output image')
            flags.DEFINE_integer('num_classes', 80,
                                 'number of classes in the model')
        except NameError:
            print("再代入禁止")
        self.yolo = YoloV3(classes=flags.FLAGS.num_classes)
        self.yolo.load_weights(flags.FLAGS.weights).expect_partial()
        #logging.info('weights loaded')

        self.class_names = [
            c.strip() for c in open(flags.FLAGS.classes).readlines()
        ]
예제 #2
0
normalizer = preprocessing.MaxAbsScaler()
audio_dir = os.path.join(os.getcwd(), 'music-data')
SONG_FN = 'dubstep.p'
#filenames = glob.glob(audio_dir + '/fma_small/' + '/*[0-9]/*')

with open(SONG_FN, 'rb') as f:
    filenames = pickle.load(f)


flags.DEFINE_integer('batch', 32, 'Batch size')
flags.DEFINE_integer('epochs', 10, 'Number of iterations to train on the entire dataset')
flags.DEFINE_integer('latent', 100, 'Dimensionality of the latent space')
flags.DEFINE_string('model_path', '.', 'Path to model checkpoint')
flags.DEFINE_string('output_dir', '.', 'Path to model checkpoints and logs')
flags.DEFINE_string('dtype', 'float32', 'Floating point data type of tensorflow graph')
flags.DEFINE_boolean('train', False, 'Train the music GAN')
flags.DEFINE_integer('seed', -1, 'Random seed for data shuffling and latent vector generator')
flags.DEFINE_boolean('logging', False, 'Whether or not to log and checkpoint the training model')
flags.DEFINE_integer('sampling_rate', 14400, 'Sampling rate of loaded music files')
flags.DEFINE_float('g_lr', 1e-4, 'Learning rate of the generator')
flags.DEFINE_float('d_lr', 1e-6, 'Learning rate of the discriminator')
flags.DEFINE_float('dropout', 0.1, 'Dropout rate of the discriminator')
flags.DEFINE_integer('g_attn', 2, 'Number of multi-head attention layers in the generator')
flags.DEFINE_integer('d_attn', 4, 'Number of multi-head attention layers in the disciminator')
flags.DEFINE_float('noise', 0.05, 'Level of noise added to discriminator input data')
flags.DEFINE_integer('heads', 8, 'Number of heads in ALL multi-head attention blocks')
flags.DEFINE_integer('d_model', 768, 'Multi-head attention dimensionality')
flags.DEFINE_boolean('save_data', False, 'Save all training data to a file that will be loaded into memory')


if FLAGS.seed != -1:
예제 #3
0
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.compat.v1 import flags
from tensorflow.keras import optimizers
import sys, os
import config as conf

#set up and parse custom flags
flags.DEFINE_integer('model_version', conf.version, "Width of the image")
flags.DEFINE_boolean('rebuild', False, "Drop the checkpoint weights and rebuild model from scratch")
flags.DEFINE_string('lib_folder', conf.lib_folder, "Local library folder")
FLAGS = flags.FLAGS

#mount the library folder
sys.path.append(os.path.abspath(FLAGS.lib_folder))
from data import MNISTProcessor
import visualizer as v

#load data
data_processor = MNISTProcessor(conf.data_path, conf.train_labels, 
                                conf.train_images, '', '')                               
x_data_train, y_data_train = data_processor.load_train(normalize=True).get_training_data()

#initialize the network
input_layer = Input(shape=(784,), name='input')
network = Dense(152, activation='tanh', name='dense_1')(input_layer)
network = Dense(76, activation='tanh', name='dense_2')(network)
network = Dense(38, activation='tanh', name='dense_3')(network)
network = Dense(4, activation='tanh', name='dense_4')(network)
network = Dense(38, activation='tanh', name='dense_5')(network)
예제 #4
0
                   'If greater than 0 then the gradients would be clipped by '
                   'it.')

flags.DEFINE_bool('sync_replicas', False,
                  'If True will synchronize replicas during training.')

flags.DEFINE_integer('replicas_to_aggregate', 1,
                     'The number of gradients updates before updating params.')

flags.DEFINE_integer('total_num_replicas', 1,
                     'Total number of worker replicas.')

flags.DEFINE_integer('startup_delay_steps', 15,
                     'Number of training steps between replicas startup.')

flags.DEFINE_boolean('reset_train_dir', False,
                     'If true will delete all files in the train_log_dir')

flags.DEFINE_boolean('show_graph_stats', False,
                     'Output model size stats to stderr.')
# yapf: enable

TrainingHParams = collections.namedtuple('TrainingHParams', [
    'learning_rate',
    'optimizer',
    'momentum',
    'use_augment_input',
])


def get_training_hparams():
  return TrainingHParams(
예제 #5
0
import evaluate
import SAVE
import make_prediction
from tensorflow.compat.v1 import flags
import pandas as pd
import os

flags.DEFINE_string("path", "./data/catgwise/생활+건강/", "path to data file")
flags.DEFINE_string("click_data", "clicks_ma_ratio", "clicks_minmax, clicks_first_ratio, clicks_ma_ratio")
flags.DEFINE_integer("s", 60, "seasonality")
flags.DEFINE_float("dropout", 0, "dropout rate(default=0)")
flags.DEFINE_integer("epoch", 40, "epoch")
flags.DEFINE_integer("batch_size", 1, "batch size")
flags.DEFINE_integer("pred_time", 30, "how much time to predict")
flags.DEFINE_string("pred_index", "05-01-2020", "when beginning prediction(month-date-year), default:'01-01-2020'")
flags.DEFINE_boolean("bi", True,"true if bidirectional")

FLAGS = flags.FLAGS


catg_lst = os.listdir(FLAGS.path)
#temppollsell=[[True, True, True], [True, True, False], [True, False, True], [True, False, False],
                      #[False, True, True], [False, True, False], [False, False, True], [False,False,False]]
temppollsell=[[True, True, False], [True, False, False],
                      [False, True, False], [False,False,False]]
for category in catg_lst:
    data_path = "{}{}".format(FLAGS.path, category)
    file = pd.read_csv(data_path, encoding='CP949')
    file['date'] = pd.to_datetime(file['date'])
    data = file.set_index('date')
    best_predict = pd.DataFrame(data = [100], columns=['error_rate'])