コード例 #1
0
def train_model(params):

    train_generator = data.data_generator(params, mode='training')
    val_generator = data.data_generator(params, mode='validation')

    base_model = tf.keras.applications.MobileNet(
        input_shape=params['image_shape'],
        include_top=False,
        weights='imagenet')
    base_model.trainable = False

    mobilnet_tiny = model.MobilNet_Architecture_Tiny(
        width_multiplier=params['width_multiplier'],
        depth_multiplier=params['depth_multiplier'],
        num_classes=params['num_classes'],
        dropout_rate=params['dropout_rate'],
        regularization_rate=params['regularization_rate'])

    net = tf.keras.Sequential([base_model, mobilnet_tiny])

    cp_callback = tf.keras.callbacks.ModelCheckpoint(os.path.join(
        params['model_dir'], 'tf_ckpt'),
                                                     save_weights_only=True,
                                                     verbose=1,
                                                     period=5)

    tb_callback = tf.keras.callbacks.TensorBoard(
        os.path.join(params['model_dir'], 'logs'))

    optimizer = tf.keras.optimizers.Adam(lr=params['learning_rate'])

    net.compile(optimizer=optimizer,
                loss=params['loss'],
                metrics=[f1_score, 'accuracy'])

    steps_per_epoch = train_generator.n // params['batch_size']
    validation_steps = val_generator.n // params['batch_size']

    history = net.fit_generator(train_generator,
                                steps_per_epoch=steps_per_epoch,
                                epochs=params['num_epochs'],
                                workers=4,
                                validation_data=val_generator,
                                validation_steps=validation_steps,
                                callbacks=[cp_callback, tb_callback])

    # Save it under the form of a json file
    with open(os.path.join(params['model_dir'], 'history.json'), 'w') as file:
        json.dump(history.history, file)
コード例 #2
0
ファイル: main.py プロジェクト: Cloud-LoneCrane/170_unet
def predict(model_name):
    generator_train = data_generator(train_tfrecord_dir, "train")
    model = keras.models.load_model(model_name,
                                    custom_objects={
                                        "weighted_dice_coefficient_loss":
                                        weighted_dice_coefficient_loss
                                    })
    model.summary()
    images, masks = generator_train.__next__()

    pred = model.predict(images[:5])

    if not os.path.exists("predict"):
        os.mkdir("predict")

    for i in range(5):
        plt.imshow(pred[i, :, :, 0], cmap="gray")
        plt.savefig("predict/pred" + str(i) + '.png')
        plt.clf()

        plt.imshow(images[i, :, :, 0], cmap="gray")
        plt.savefig("./predict/image" + str(i) + ".png")
        plt.clf()

        plt.imshow(masks[i, :, :, 0], cmap="gray")
        plt.savefig("./predict/mask" + str(i) + ".png")
        plt.clf()

    return None
コード例 #3
0
def test():
    test_data_lists = get_files(config.test_data,"test")
    test_datagen = data_generator(test_data_lists,"test",augument=False).create_train()
    model = get_model()
    model.load_weights(config.weights_path)
    predicted_labels = np.argmax(model.predict_generator(test_datagen,steps=len(test_data_lists) / 16),axis=-1)  
    print(predicted_labels) 
コード例 #4
0
def train_model(train_filelist, config, classes, rare_classes):
    batch_size = config['train_params']['batch_size']
    different_classes_per_batch = config['train_params']['different_classes_per_batch']
    nb_epoch = config['train_params']['num_epochs']
    steps_per_epoch = config['train_params']['steps_per_epoch']
    img_size = config['img']['img_size']

    model = prepare_model(config)
    callbacks = get_callbacks(config)
    model.fit_generator(data_generator(train_filelist, classes, rare_classes, img_size, batch_size, different_classes_per_batch),
                        steps_per_epoch=steps_per_epoch, epochs=nb_epoch, callbacks=callbacks)
    return model
コード例 #5
0
ファイル: main.py プロジェクト: Cloud-LoneCrane/170_unet
def train():
    generator_train = data_generator(train_tfrecord_dir, "train")
    generator_test = data_generator(test_tfrecord_dir, "test")
    images, masks = generator_train.__next__()
    # save_img(images, masks)

    input_shape = (512, 512, 1)
    model = uent2d_model(input_shape=input_shape,
                         loss_function=keras.losses.binary_crossentropy)
    # 打印模型结构
    model.summary()

    # # 保存模型图
    # from keras.utils import plot_model
    # plot_model(model, to_file="model.png")

    tensorboard = keras.callbacks.TensorBoard(
        log_dir='./logs',  # log 目录
        histogram_freq=0,  # 按照何等频率(epoch)来计算直方图,0为不计算
        batch_size=5,  # 用多大量的数据计算直方图
        write_graph=True,  # 是否存储网络结构图
        write_grads=True,  # 是否可视化梯度直方图
        write_images=True,  # 是否可视化参数
        embeddings_freq=0,
        embeddings_layer_names=None,
        embeddings_metadata=None)
    if not os.path.exists("./ckpt"):
        os.mkdir("./ckpt")
    filepath = "./ckpt/{epoch:03d}-{val_loss:.4f}.h5"
    callbacks = get_callbacks(filepath)
    callbacks.append(tensorboard)
    model.fit_generator(generator_train,
                        validation_data=generator_test,
                        steps_per_epoch=steps_per_epoch,
                        callbacks=callbacks,
                        epochs=epochs,
                        verbose=1,
                        validation_steps=1)
    model.save("./ckpt/save_model.h5")
    return None
コード例 #6
0
def train(callbacks):
    #1. compile
    print("--> Compiling the model...")
    model = get_model()
    # load raw train data
    raw_train_data_lists = get_files(config.train_data,"train")
    #split raw train data to train and val
    train_data_lists,val_data_lists = train_test_split(raw_train_data_lists,test_size=0.3)
    # for train
    train_datagen = data_generator(train_data_lists,"train",augument=True).create_train()
    #embed()
    # val data
    val_datagen = data_generator(val_data_lists,"val",augument=True).create_train()  # if model can predict better on augumented data ,the model should be more reboust
    history = model.fit_generator(
        train_datagen,
        validation_data = val_datagen,
        epochs = config.epochs,
        verbose = 1,
        callbacks = callbacks,
        steps_per_epoch=len(train_data_lists) // config.batch_size,
        validation_steps=len(val_data_lists) // config.batch_size
    )
コード例 #7
0
    def train(self,
              train_dataset,
              val_dataset,
              learning_rate,
              epochs,
              augment=None,
              custom_callbacks=None):
        """Train the model.
        train_dataset, val_dataset: Training and validation Dataset objects.
        learning_rate: The learning rate to train with
        epochs: Number of training epochs.
        augment: Optional.
        """
        assert self.mode == "training", "Create model in training mode."

        # Data generators
        train_generator = data_generator(train_dataset,
                                         self.config,
                                         shuffle=True,
                                         augment=augment)
        val_generator = data_generator(val_dataset, self.config)

        # Create log_dir if it does not exist
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)

        # Callbacks
        callbacks = [
            keras.callbacks.TensorBoard(log_dir=self.log_dir,
                                        histogram_freq=0,
                                        write_graph=True,
                                        write_images=False),
            keras.callbacks.ModelCheckpoint(self.checkpoint_path,
                                            verbose=0,
                                            save_weights_only=True),
        ]

        # Add custom callbacks to the list
        if custom_callbacks:
            callbacks += custom_callbacks

        # Train
        log("\nStarting at epoch {}. LR={}\n".format(self.epoch,
                                                     learning_rate))
        log("Checkpoint Path: {}".format(self.checkpoint_path))
        self.compile(learning_rate, self.config.MOMENTUM)

        # Work-around for Windows: Keras fails on Windows when using
        # multiprocessing workers. See discussion here:
        # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
        if os.name is 'nt':
            workers = 0
        else:
            workers = multiprocessing.cpu_count()

        self.keras_model.fit_generator(
            train_generator,
            initial_epoch=self.epoch,
            epochs=epochs,
            steps_per_epoch=self.config.STEPS_PER_EPOCH,
            callbacks=callbacks,
            validation_data=val_generator,
            validation_steps=self.config.VAL_STEPS,
            max_queue_size=100,
            workers=workers,
            use_multiprocessing=True,
        )
        self.epoch = max(self.epoch, epochs)
コード例 #8
0
ファイル: char_ptb.py プロジェクト: CookieBox26/trellisnet
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
setproctitle(args.name)
torch.set_default_tensor_type('torch.FloatTensor')
if torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    if not args.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")
    else:
        torch.cuda.manual_seed(args.seed)

###############################################################################
# Load data
###############################################################################

file, file_len, valfile, valfile_len, testfile, testfile_len, corpus = data_generator(args)

ntokens = len(corpus.dictionary)
eval_batch_size = 10
test_batch_size = 10
train_data = batchify(char_tensor(corpus, file), args.batch_size, args)
val_data = batchify(char_tensor(corpus, valfile), eval_batch_size, args)
test_data = batchify(char_tensor(corpus, testfile), eval_batch_size, args)
print(train_data.size(), val_data.size())


class Logger(object):
    def __init__(self):
        self.terminal = sys.stdout
        self.log = open("logs/" + args.name + ".log", "a")
コード例 #9
0
from model import Train
from model import TorchModel
from src import Evaluation
from data import data_generator

if __name__ == '__main__':
    # Generate data samples for training and test
    x_train, x_test, y_train, y_test = data_generator(num_samples=100,
                                                      visualize_plot=False)
    # Initialize training class
    torch_model = Train(x_train, y_train)
    # Train torch model
    torch_model.train_torch_model()
    # Save model
    torch_model.save_model()

    # Once the model is trained and saved as torch_model.onnx,
    # it will be loaded and evaluated with onnxruntime, caff2 and tensorflow
    evaluation = Evaluation(x_test, y_test, 'onnx/torch_model.onnx')

    # Trigger the evaluators
    evaluation.onnxruntime_evaluation()
    evaluation.caffe2_evaluation()
    evaluation.tensorflow_evaluation()
コード例 #10
0
from keras.utils import generic_utils
from keras.metrics import categorical_crossentropy

from config import Config
from model import build_model_training
from roipooling import rpn_to_roi
from class_gt import calc_gt_class
from data import get_data, data_generator
from losses import rpn_loss_cls, get_loss_regr

assert os.path.isdir('model')

# get config and data
conf = Config()
train_imgs, classes_count = get_data(conf.train_path)
data_gen_train = data_generator(train_imgs, conf)

# build the model
model_rpn, model_classifier, model_all = build_model_training(conf)

# check if model is already existing
if not os.path.isfile(conf.weights_path):
    # start a new history
    record_df = pd.DataFrame(columns=[
        'mean_overlapping_bboxes', 'class_acc', 'loss_rpn_cls',
        'loss_rpn_regr', 'loss_class_cls', 'loss_class_regr', 'curr_loss',
        'elapsed_time'
    ])
else:
    # load weights for whole model (maybe rpn, class separately?)
    model_all.load_weights(conf.weights_path, by_name=True)
コード例 #11
0
from keras.layers import *
from keras import backend as K
from keras.models import Model, load_model
from keras.optimizers import Adam

from train import yolo3_loss
from data import data_generator

C = 80
# images_path = "D:/DeepLearning/data/VOCdevkit/VOC2012/JPEGImages/"
# annotations_path = "D:/DeepLearning/data/VOCdevkit/VOC2012/Annotations/"
# batch_size = 16
# pick = []

y_true_input = list()
y_true_input.append(Input(name='input0', shape=[52, 52, 3, (5 + C)]))
y_true_input.append(Input(name='input1', shape=[26, 26, 3, (5 + C)]))
y_true_input.append(Input(name='input2', shape=[13, 13, 3, (5 + C)]))

model_load = load_model('model.h5', compile=False)
loss_layer = Lambda(yolo3_loss,
                    name='loss_layer')([*model_load.output, *y_true_input])
model = Model(inputs=[model_load.input, *y_true_input], outputs=loss_layer)

model.compile(optimizer=Adam(lr=1e-3), loss=lambda y_true, y_pred: y_pred)
model.fit_generator(generator=data_generator(),
                    steps_per_epoch=17125 / 16,
                    epochs=30)

exit()
コード例 #12
0
ファイル: training.py プロジェクト: Nimi42/AlphaZero
from nn import chess_conv
from data import data_generator


def train(model, gen):
    model.fit_generator(gen, epochs=50, steps_per_epoch=120 * 121 // 64)


if __name__ == '__main__':
    path = "/home/nemo/Downloads/stockfish_jonny_2014.pgn"

    gen = data_generator('../../resources/pgn_data', 64)
    model = chess_conv()

    train(model, gen)

    model.save('/home/nemo/Downloads/omega_one.h5')
コード例 #13
0
ファイル: Bert.py プロジェクト: yefan001/TextClassify
    x = bert_model([x1_in, x2_in])
    x = Lambda(lambda x: x[:, 0])(x)  # 取出[CLS]对应的向量用来做分类
    x = Dropout(BertConfig.droupout)(x)
    p = Dense(num_classes, activation='softmax')(x)

    model = Model([x1_in, x2_in], p)
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(BertConfig.learn_rate),
                  metrics=['accuracy'])
    model.summary()
    return model


model = bert_model(BertConfig)

train_D = data_generator(train_data, BertConfig.seq_length,
                         BertConfig.batch_size, tokenizer)
valid_D = data_generator(valid_data, BertConfig.seq_length,
                         BertConfig.batch_size, tokenizer)

model.fit_generator(train_D.__iter__(),
                    steps_per_epoch=len(train_D),
                    epochs=BertConfig.epochs,
                    validation_data=valid_D.__iter__(),
                    validation_steps=len(valid_D))

# 模型预测
test_D = data_generator(test_data, BertConfig.seq_length,
                        BertConfig.batch_size, tokenizer)
test_model_pred = model.predict_generator(test_D.__iter__(),
                                          steps=len(test_D),
                                          verbose=1)
コード例 #14
0
ファイル: train.py プロジェクト: VadymBoikov/image-improve

# JPEG
# degrade_funs = [partial(degrad.compress_jpeg, quality=i) for i in range(10,100,20)]
# train_paths = np.array([os.path.join(train_images_dir, i) for i in os.listdir(train_images_dir)])


# SUPER RES
# degrade_funs = [degrad.bicubic_restoration]
# train_paths = np.array([os.path.join(train_images_dir, i) for i in os.listdir(train_images_dir)])


train_images = data.mp_handler(Image.open, train_paths, n_workers)
train_batches_epoch = len(train_paths) // batch_size

train_generator = data.data_generator(train_images, degrade_funs, patch_size=patch_size, batch_size=batch_size)


model = nets.network(input_shape, K=12)
model.compile(optimizer=Adam(initial_lr), loss="mean_squared_error")

checkpointer = ModelCheckpoint(filepath='%s/check_{epoch:02d}.ckpt' % CHECKPOINT, verbose=1, period=2)
lrate = LearningRateScheduler(lambda epoch: initial_lr * 0.9 ** epoch)
tensorboard = TensorBoard(log_dir=LOGDIR, histogram_freq=0,
                          write_graph=True, write_grads=True,
                          batch_size=batch_size)

model.fit_generator(train_generator, steps_per_epoch=train_batches_epoch, epochs=n_epochs,
                    max_queue_size=128, callbacks=[checkpointer, lrate, tensorboard],
                    use_multiprocessing=False, workers=n_workers)
コード例 #15
0
from data import read_data, data_generator
from model import *
from varible import *

x_train, y_train, x_valid, y_valid = read_data(Gb_data_dir)

input_pb = tf.placeholder(tf.float32, [None, 224, 224, 3])
label_pb = tf.placeholder(tf.int32, [None])
logist, net = vgg16_adjusted(input_pb, is_train=False)

saver = tf.train.Saver()
with tf.Session() as sess:
    try:
        saver.restore(sess, '/media/hsq/新加卷/ubuntu/ckpt/dogVScat/vgg16_adjusted/0/' + "ep450-step281500-loss0.001")
        print("load ok!")
    except:
        print("ckpt文件不存在")
        raise

    data_yield = data_generator(x_valid, y_valid, is_train=False)
    error_num = 0
    i = 0
    for img, lable in data_yield:
        logist_out = sess.run(logist, feed_dict={input_pb: img})
        logist_out = np.argmax(logist_out, axis=-1)
        a = np.equal(logist_out, list(map(int, lable)))
        a = list(a)
        error_num += a.count(False)
        i += 1
    print('error: ', str(error_num), ' in ', str(i * Gb_batch_size))
コード例 #16
0
def main():
    # n_class = len(Gb_label)
    # model_name = Gb_model_name
    log_dir = Gb_ckpt_dir
    final_dir = Gb_ckpt_dir
    save_frequency = Gb_save_frequency
    batch_size = Gb_batch_size
    learning_rate = Gb_learning_rate

    annotations_path = Gb_ann_path
    pick = Gb_labels
    chunks = read_xml(annotations_path, pick)

    n_epoch = Gb_epoch
    n_step_epoch = int(len(chunks) / batch_size)
    # n_step = n_epoch * n_step_epoch

    input_pb = tf.placeholder(tf.float32, [None, 416, 416, 3])
    y_true_pb = tf.placeholder(tf.float32, [None, 52, 52])
    net_out = infenence(input_pb)
    loss_op = yolo_loss(net_out, y_true_pb)
    train_op = training(loss_op, learning_rate)

    # varis = tf.global_variables()
    # var_to_restore = [val for val in varis if 'Adam' not in val.name and 'optimizer' not in val.name]
    # saver = tf.train.Saver(var_to_restore)
    saver = tf.train.Saver()
    summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # if tf.train.get_checkpoint_state('./ckpt2/'):  # 确认是否存在
        #     saver.restore(sess, './ckpt2/' + "ep094-step17000-loss61286.484")
        #     print("load ok!")
        # else:
        #     print("ckpt文件不存在")

        # tensor = tf.global_variables('layer_0_conv')
        # b = sess.run(tensor)

        train_writer = tf.summary.FileWriter(log_dir, sess.graph)
        step = 0
        min_loss = 10000000
        for epoch in range(n_epoch):
            step_epoch = 0
            # TODO shuffle chunks
            data_yield = data_generator(chunks)

            # train_loss, n_batch = 0, 0
            for origin_img_sizeds, segment_datas in data_yield:
                step += 1
                step_epoch += 1
                start_time = time.time()

                summary_str, loss, _ = sess.run(
                    [summary_op, loss_op, train_op],
                    feed_dict={
                        input_pb: origin_img_sizeds,
                        y_true_pb: segment_datas
                    })
                train_writer.add_summary(summary_str, step)

                # 每step打印一次该step的loss
                print("Loss %fs  : Epoch %d  %d/%d: Step %d  took %fs" %
                      (loss, epoch, step_epoch, n_step_epoch, step,
                       time.time() - start_time))

                if step % 1000 == 0 and loss < min_loss:
                    print("Save model " + "!" * 10)
                    save_path = saver.save(
                        sess,
                        final_dir + 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                            epoch, step, loss))
                    min_loss = loss

                if step % save_frequency == 0:
                    if step != save_frequency:
                        os.remove(final_dir + temp + '.data-00000-of-00001')
                        os.remove(final_dir + temp + '.index')
                        os.remove(final_dir + temp + '.meta')

                    print("Save model " + "!" * 10)
                    save_path = saver.save(
                        sess,
                        final_dir + 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                            epoch, step, loss))
                    temp = 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                        epoch, step, loss)
コード例 #17
0
     'bn_11': [],
     'bn_12': [],
     'bn_21': [],
     'bn_22': [],
     'bn_31': [],
     'bn_32': [],
     'bn_33': [],
     'bn_41': [],
     'bn_42': [],
     'bn_43': [],
     'bn_51': [],
     'bn_52': [],
     'bn_53': []
 }
 for key in apoz:
     data_yield = data_generator(x_valid, y_valid)
     j = 1
     for img, lable in data_yield:
         out = sess.run(get_target_output(key, net),
                        feed_dict={input_pb: img})
         for i in range(out.shape[-1]):
             nonzero_num = np.count_nonzero(out[..., i])
             zero_num = (out[..., i].size -
                         nonzero_num) / Gb_batch_size
             try:
                 apoz[key][i] = (apoz[key][i] *
                                 (j - 1) + zero_num) / j
             except:
                 apoz[key].append(zero_num)
         j += 1
     for i in range(len(apoz[key])):
コード例 #18
0
def main():
    x_train, y_train, x_valid, y_valid = read_data(Gb_data_dir)

    batch_size = Gb_batch_size
    learning_rate = Gb_learning_rate
    log_dir = Gb_ckpt_dir
    final_dir = Gb_ckpt_dir
    n_epoch = Gb_epoch
    n_step_epoch = int(len(y_train) / batch_size)
    save_frequency = Gb_save_frequency

    input_pb = tf.placeholder(tf.float32, [None, 224, 224, 3])
    label_pb = tf.placeholder(tf.int32, [None])
    logist, net = resnet_101(input_pb)
    loss_op = losses(logits=logist, labels=label_pb)
    train_op = optimizer_sgd(loss_op, learning_rate=learning_rate)

    saver = tf.train.Saver(max_to_keep=1000)
    summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        try:
            saver.restore(sess,
                          os.path.join('/media/xinje/New Volume/hsq/ckpt/birds/res0/', 'res0ep099-step59000-loss2.526'))
            print("load ok!")
        except:
            print("ckpt文件不存在")
            raise

        train_writer = tf.summary.FileWriter(log_dir, sess.graph)
        step = 0
        min_loss = 10000000
        for epoch in range(n_epoch):
            step_epoch = 0
            # TODO shuffle chunks
            data_yield = data_generator(x_train, y_train)

            for img, lable in data_yield:
                step += 1
                step_epoch += 1
                start_time = time.time()
                loss, _, summary_str = sess.run([loss_op, train_op, summary_op],
                                                feed_dict={input_pb: img, label_pb: lable})
                train_writer.add_summary(summary_str, step)
                # 每step打印一次该step的loss
                print("Loss %fs  : Epoch %d  %d/%d: Step %d  took %fs" % (
                    loss, epoch, step_epoch, n_step_epoch, step, time.time() - start_time))

                if step % save_frequency == 0:
                    print("Save model " + "!" * 10)
                    saver.save(sess, os.path.join(final_dir,
                                                  'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(epoch, step, loss)))
                    if loss < min_loss:
                        min_loss = loss
                    else:
                        try:
                            os.remove(os.path.join(final_dir, temp + '.data-00000-of-00001'))
                            os.remove(os.path.join(final_dir, temp + '.index'))
                            os.remove(os.path.join(final_dir, temp + '.meta'))
                        except:
                            pass
                        temp = 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(epoch, step, loss)
コード例 #19
0
def main():
    n_class = len(Gb_label)
    # model_name = Gb_model_name
    log_dir = Gb_ckpt_dir
    final_dir = Gb_ckpt_dir
    save_frequency = Gb_save_frequency
    label_dir = Gb_label_dir
    batch_size = Gb_batch_size
    pick = Gb_label
    learning_rate = Gb_learning_rate
    chunks = read_xml(
        Gb_img_dir,
        label_dir,
        pick,
    )
    n_epoch = Gb_epoch
    n_step_epoch = int(len(chunks) / batch_size)
    # n_step = n_epoch * n_step_epoch

    input_pb = tf.placeholder(tf.float32, [None, 416, 416, 3])
    y_true_pb = tf.placeholder(tf.float32,
                               [None, Gb_cell, Gb_cell, 9, 5 + n_class])
    net_out = infenence(input_pb)
    # net_out = squeezenet(input_pb)
    loss_op = model_loss(net_out, y_true_pb)
    train_op = training(loss_op, learning_rate)

    # varis = tf.global_variables()
    # var_to_restore = [val for val in varis if 'Adam' not in val.name and 'optimizer' not in val.name]
    # saver = tf.train.Saver(var_to_restore)
    saver = tf.train.Saver(max_to_keep=100)
    summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # if tf.train.get_checkpoint_state('./ckpt3/'):  # 确认是否存在
        #     saver.restore(sess, './ckpt3/' + "test.ckpt")
        #     print("load ok!")
        # else:
        #     print("ckpt文件不存在")

        train_writer = tf.summary.FileWriter(log_dir, sess.graph)
        step = 0
        min_loss = 10000000
        for epoch in range(n_epoch):
            step_epoch = 0
            # TODO shuffle chunks
            data_yield = data_generator(chunks)

            # train_loss, n_batch = 0, 0
            for img, lable_box in data_yield:
                step += 1
                step_epoch += 1
                start_time = time.time()

                # a = sess.run(tf.trainable_variables()[0])
                summary_str, loss, _ = sess.run(
                    [summary_op, loss_op, train_op],
                    feed_dict={
                        input_pb: img,
                        y_true_pb: lable_box
                    })
                train_writer.add_summary(summary_str, step)

                # 每step打印一次该step的loss
                print("Loss %fs  : Epoch %d  %d/%d: Step %d  took %fs" %
                      (loss, epoch, step_epoch, n_step_epoch, step,
                       time.time() - start_time))

                if step % save_frequency == 0 and loss < min_loss:
                    print("Save model " + "!" * 10)
                    save_path = saver.save(
                        sess,
                        final_dir + 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                            epoch, step, loss))
                    min_loss = loss
コード例 #20
0
def main():
    n_class = len(Gb_label)
    log_dir = Gb_ckpt_dir
    final_dir = Gb_ckpt_dir
    save_frequency = Gb_save_frequency
    batch_size = Gb_batch_size
    pick = Gb_label
    learning_rate = Gb_learning_rate
    chunks = read_xml('train.txt', pick)
    n_epoch = Gb_epoch
    n_step_epoch = int(len(chunks) / batch_size)

    input_pb = tf.placeholder(tf.float32, [None, 416, 416, 3])
    y_true_pb_1 = tf.placeholder(tf.float32, [None, 52, 52, 3, 5 + n_class])
    y_true_pb_2 = tf.placeholder(tf.float32, [None, 26, 26, 3, 5 + n_class])
    y_true_pb_3 = tf.placeholder(tf.float32, [None, 13, 13, 3, 5 + n_class])
    net_out = inference(input_pb, n_class)
    loss_op = yolo3_loss(net_out, [y_true_pb_1, y_true_pb_2, y_true_pb_3])
    train_op = training(loss_op, learning_rate)

    # varis = tf.global_variables()
    # var_to_restore = [val for val in varis if 'Adam' not in val.name and 'optimizer' not in val.name]
    # saver = tf.train.Saver(var_to_restore)
    saver = tf.train.Saver()
    summary_op = tf.summary.merge_all()
    temp = ''
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # if tf.train.get_checkpoint_state('./ckpt3/'):  # 确认是否存在
        #     saver.restore(sess, './ckpt3/' + "test.ckpt")
        #     print("load ok!")
        # else:
        #     print("ckpt文件不存在")

        # tensor = tf.global_variables('layer_0_conv')
        # b = sess.run(tensor)

        train_writer = tf.summary.FileWriter(log_dir, sess.graph)
        step = 0
        min_loss = 10000000
        for epoch in range(n_epoch):
            step_epoch = 0
            # TODO shuffle chunks
            data_yield = data_generator(chunks)

            for img, lable_box in data_yield:
                step += 1
                step_epoch += 1
                start_time = time.time()

                loss, _, summary_str = sess.run(
                    [loss_op, train_op, summary_op],
                    feed_dict={
                        input_pb: img,
                        y_true_pb_1: lable_box[0],
                        y_true_pb_2: lable_box[1],
                        y_true_pb_3: lable_box[2]
                    })
                train_writer.add_summary(summary_str, step)

                # 每step打印一次该step的loss
                print("Loss %fs  : Epoch %d  %d/%d: Step %d  took %fs" %
                      (loss, epoch, step_epoch, n_step_epoch, step,
                       time.time() - start_time))

                if step % save_frequency == 0:
                    print("Save model " + "!" * 10)
                    save_path = saver.save(
                        sess,
                        final_dir + 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                            epoch, step, loss))
                    if loss < min_loss:
                        min_loss = loss
                    else:
                        try:
                            os.remove(final_dir + temp +
                                      '.data-00000-of-00001')
                            os.remove(final_dir + temp + '.index')
                            os.remove(final_dir + temp + '.meta')
                        except:
                            pass
                        temp = 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                            epoch, step, loss)
コード例 #21
0
import torch
import models

from data import data_generator

epochs = 400
train_size = 1500
batch_size = 128
steps_per_epoch = round(train_size / batch_size)

net = models.MiniSqueezeNet(1, 10)

criterion = nn.BCELoss()
optimizer = optim.Adam(net.parameters())

data_gen = data_generator(batch_size=batch_size)

for epoch in range(epochs):

    for step in range(steps_per_epoch):
        inputs, labels = next(data_gen)
        inputs = torch.Tensor(inputs)
        labels = torch.Tensor(labels)
        optimizer.zero_grad()

        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        loss = loss.item()
コード例 #22
0
ファイル: prune.py プロジェクト: dingjiangang/model_pruning
                d = a[f]
            elif str(int(layer_name) + 1) in var_part_restore[i]:
                c = [i for i in range(a.shape[-2]) if i not in b]
                d = a[..., c, :]
            else:
                d = a[..., c]
            sess.run(tf.assign(get_target_variable(var_part_restore[i]), d))

    saver = tf.train.Saver(max_to_keep=1000)
    train_writer = tf.summary.FileWriter(log_dir, sess.graph)
    step = 0
    min_loss = 10000000
    for epoch in range(n_epoch):
        step_epoch = 0
        # TODO shuffle chunks
        data_yield = data_generator(x_train, y_train, is_train=True)

        for img, lable in data_yield:
            step += 1
            step_epoch += 1
            start_time = time.time()
            loss, _, summary_str = sess.run([loss_op, train_op, summary_op],
                                            feed_dict={
                                                input_pb: img,
                                                label_pb: lable
                                            })
            train_writer.add_summary(summary_str, step)
            # 每step打印一次该step的loss
            print("Loss %fs  : Epoch %d  %d/%d: Step %d  took %fs" %
                  (loss, epoch, step_epoch, n_step_epoch, step,
                   time.time() - start_time))
コード例 #23
0
ファイル: train.py プロジェクト: dingjiangang/model_pruning
def main():
    x_train, y_train, x_valid, y_valid, x_test, y_test = read_data(
        '/home/hsq/DeepLearning/data/dogVscat/train',
        0.3,
        0,
        pos_path="/dog/",
        neg_path="/cat/")

    batch_size = Gb_batch_size
    learning_rate = Gb_learning_rate
    log_dir = Gb_ckpt_dir
    final_dir = Gb_ckpt_dir
    n_epoch = Gb_epoch
    n_step_epoch = int(len(y_train) / batch_size)
    save_frequency = Gb_save_frequency

    input_pb = tf.placeholder(tf.float32, [None, 224, 224, 3])
    label_pb = tf.placeholder(tf.int32, [None])
    logist, net = model(input_pb)
    loss_op = losses(logits=logist, labels=label_pb)
    train_op = trainning(loss_op, learning_rate=learning_rate)

    saver = tf.train.Saver(max_to_keep=100)
    summary_op = tf.summary.merge_all()
    temp = ''
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # try:
        #     saver.restore(sess, '/media/hsq/新加卷/ubuntu/ckpt/vgg16/3/' + "ep168-step92000-loss0.179")
        #     print("load ok!")
        # except:
        #     print("ckpt文件不存在")
        #     raise

        train_writer = tf.summary.FileWriter(log_dir, sess.graph)
        step = 0
        min_loss = 10000000
        for epoch in range(n_epoch):
            step_epoch = 0
            # TODO shuffle chunks
            data_yield = data_generator(x_train, y_train)

            for img, lable in data_yield:
                step += 1
                step_epoch += 1
                start_time = time.time()
                loss, _, summary_str = sess.run(
                    [loss_op, train_op, summary_op],
                    feed_dict={
                        input_pb: img,
                        label_pb: lable
                    })
                train_writer.add_summary(summary_str, step)
                # 每step打印一次该step的loss
                print("Loss %fs  : Epoch %d  %d/%d: Step %d  took %fs" %
                      (loss, epoch, step_epoch, n_step_epoch, step,
                       time.time() - start_time))

                if step % save_frequency == 0:
                    print("Save model " + "!" * 10)
                    save_path = saver.save(
                        sess,
                        final_dir + 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                            epoch, step, loss))
                    if loss < min_loss:
                        min_loss = loss
                    else:
                        try:
                            os.remove(final_dir + temp +
                                      '.data-00000-of-00001')
                            os.remove(final_dir + temp + '.index')
                            os.remove(final_dir + temp + '.meta')
                        except:
                            pass
                        temp = 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                            epoch, step, loss)
コード例 #24
0
               input_shape=input_shape))
    model.add(BatchNormalization(axis=1))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(BatchNormalization(axis=1))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(BatchNormalization(axis=1))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())

    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(clipnorm=1.),
                  metrics=['accuracy'])

    model.save("model")

    model.fit_generator(generator=data_generator(batch_size=batch_size),
                        steps_per_epoch=steps_per_epoch,
                        epochs=epochs)
    model.save("model")
コード例 #25
0
ファイル: Train.py プロジェクト: MiRA-lab-dev/SynRec
    nadam = Adam(lr=0.001)
    model.load_weights('model.10-0.193.hdf5')
    model_checkpoint = ModelCheckpoint('model.{epoch:02d}-{val_loss:.3f}.hdf5',
                                       monitor='val_loss',
                                       verbose=1)
    # model_lrschedule = LearningRateScheduler(lr_schedule)
    # model.compile(optimizer=nadam, loss='binary_crossentropy', metrics=['accuracy'])
    model.compile(optimizer=sgd,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    ####### data generator ########
    train_dataset = 'train'
    val_dataset = 'val'
    train_generator = data_generator(train_dataset,
                                     shuffle=True,
                                     augment=True,
                                     batch_size=1)
    val_generator = data_generator(val_dataset,
                                   shuffle=True,
                                   batch_size=1,
                                   augment=False)

    model.fit_generator(generator=train_generator,
                        steps_per_epoch=1000,
                        epochs=20,
                        verbose=1,
                        callbacks=[model_checkpoint],
                        validation_data=next(val_generator),
                        validation_steps=100)

    # model.fit_generator(train_generator, steps_per_epoch=2000, epochs=30,
コード例 #26
0
ファイル: train.py プロジェクト: alecGraves/unet-tf
# configuration session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
''' Users define data loader (with train and test) '''
img_shape = [512, 512]
batch_size = 6
epochs = 10
steps_per_epoch = 2 * 2500 // batch_size  # 28 npz's averaging 75 images a piece
train_dir = 'D:\\data\\road_detector\\train3'
val_dir = 'D:\\data\\road_detector\\val'
load_from_checkpoint = ''
checkpoint_path = os.path.join('..', 'training', 'weights')
tensorboard_path = os.path.join('..', 'training', 'logs')
train_generator = data_generator(train_dir,
                                 batch_size=batch_size,
                                 shape=img_shape,
                                 flip_prob=.4)
test_generator = data_generator(val_dir,
                                batch_size=batch_size,
                                shape=img_shape,
                                flip_prob=0)

num_test_samples = 100

label = tf.placeholder(tf.float32, shape=[None] + img_shape + [1])

with tf.name_scope('unet'):
    model = UNet().create_model(img_shape=img_shape + [3], num_class=1)
    img = model.input
    pred = model.output
    # img, pred = create_unet(in_shape=img_shape+[7], out_channels=1, depth=5)
コード例 #27
0
plt.show()

x_train, y_train, x_valid, y_valid, x_test, y_test = read_data(
    '/home/hsq/DeepLearning/data/LongWoodCutPickJpg/')

input_pb = tf.placeholder(tf.float32, [None, 224, 224, 3])
logist, net = model(input_pb)

saver = tf.train.Saver()
with tf.Session() as sess:
    if tf.train.get_checkpoint_state('./ckpt/'):  # 确认是否存在
        saver.restore(sess, './ckpt/' + "ep050-step3000-loss0.000")
        print("load ok!")
    else:
        print("ckpt文件不存在")
    data_yield = data_generator(x_train, y_train)
    i = 0
    fc2_temp = None
    final_temp = None
    for img, lable in data_yield:
        b = net.all_layers
        # c = net.all_params
        # fc2_w = sess.run(net.all_params[-2])
        fc2_out, final_out = sess.run([net.all_layers[-2], net.all_layers[-1]],
                                      feed_dict={input_pb: img})
        np.save('./matrix/fc2/' + str(i), fc2_out)
        np.save('./matrix/final/' + str(i), final_out)
        if i == 0:
            fc2_temp = fc2_out
            final_temp = final_out
        else: