Exemple #1
0
max_iter = 200000  # 600000

# True = start data generator client, False = use augmented dataset file (deprecated)
use_client_gen = True

WEIGHTS_BEST = "weights.best.h5"
TRAINING_LOG = "training.csv"
LOGS_DIR = "./logs"


def get_last_epoch():
    data = pandas.read_csv(TRAINING_LOG)
    return max(data['epoch'].values)


model = get_training_model(weight_decay, vgg_norm=True)

from_vgg = dict()
from_vgg['conv1_1'] = 'block1_conv1'
from_vgg['conv1_2'] = 'block1_conv2'
from_vgg['conv2_1'] = 'block2_conv1'
from_vgg['conv2_2'] = 'block2_conv2'
from_vgg['conv3_1'] = 'block3_conv1'
from_vgg['conv3_2'] = 'block3_conv2'
from_vgg['conv3_3'] = 'block3_conv3'
from_vgg['conv3_4'] = 'block3_conv4'
from_vgg['conv4_1'] = 'block4_conv1'
from_vgg['conv4_2'] = 'block4_conv2'

# load previous weights or vgg19 if this is the first run
if os.path.exists(WEIGHTS_BEST):
Exemple #2
0
def letter_probs_to_code(letter_probs):
    return "".join(common.CHARS[i] for i in numpy.argmax(letter_probs, axis=1))


if __name__ == "__main__":
    weights_cnn = 'weights_20180708_171720_feedback.npz'
    weights_yolo = 'model_data/yolo_19000.h5'
    start_time = time.time()
    prYellow('Carregando Yolov3...')
    yolo = yolo.YOLO(weights_yolo)
    prGreen('Yolov3: modelo, âncoras e classes carregadas.')
    prYellow('Carregando Rede Neural Convolucional (CNN)...')
    f = numpy.load(weights_cnn)
    param_vals = [f[n] for n in sorted(f.files, key=lambda s: int(s[4:]))]
    x, y, params = model.get_training_model()
    prGreen('CNN: Modelo e pesos carregados.')

    time.sleep(2)
    prYellow('Inicializando sessão do Tensorflow...')
    with tf.Session(config=tf.ConfigProto()) as sess:
        feed_dict = {x: numpy.zeros([1, 64, 128])}
        feed_dict.update(dict(zip(params, param_vals)))
        y_val = sess.run(y, feed_dict=feed_dict)
        out_boxes, out_probs, out_labels = yolo.get_detections(
            Image.fromarray(numpy.zeros([100, 100, 3]).astype('uint8')))
        prGreen('Sessão do Tensorflow inicializada.')
        prYellow('Inicializando câmera...')
        camera = PiCamera()
        camera.resolution = (1024, 768)
        camera.framerate = 16  #original 32
Exemple #3
0
def train(learn_rate, report_steps, batch_size, initial_weights=None):
    """
    Train the network.

    The function operates interactively: Progress is reported on stdout, and
    training ceases upon `KeyboardInterrupt` at which point the learned weights
    are saved to `weights.npz`, and also returned.

    :param learn_rate:
        Learning rate to use.

    :param report_steps:
        Every `report_steps` batches a progress report is printed.

    :param batch_size:
        The size of the batches used for training.

    :param initial_weights:
        (Optional.) Weights to initialize the network with.

    :return:
        The learned network weights.

    """
    x, y, params = model.get_training_model()

    y_ = tf.placeholder(tf.float32, [None, 7 * len(common.CHARS) + 1])

    digits_loss = get_loss(y, y_)
    train_step = tf.train.AdamOptimizer(learn_rate).minimize(
        digits_loss)  ###ORIGINAL LOSS

    best = tf.argmax(tf.reshape(y[:, 1:], [-1, 7, len(common.CHARS)]), 2)
    correct = tf.argmax(tf.reshape(y_[:, 1:], [-1, 7, len(common.CHARS)]), 2)

    if initial_weights is not None:
        assert len(params) == len(initial_weights)
        assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

    init = tf.global_variables_initializer()

    #init = tf.initialize_all_variables()

    def vec_to_plate(v):
        return "".join(common.CHARS[i] for i in v)

    def do_report():
        r = sess.run([best, correct, digits_loss],
                     feed_dict={
                         x: test_xs,
                         y_: test_ys
                     })
        #num_correct = numpy.sum(numpy.all(r[0] == r[1], axis=1))
        num_correct = numpy.sum(r[0] == r[1])

        r_short = (r[0][:30], r[1][:30])
        #for b, c in zip(*r_short):
        #    print("{} <-> {}".format(vec_to_plate(c),vec_to_plate(b)))
        #num_p_correct = numpy.sum(r[2] == r[3])
        #train_writer.add_summary(mrg, batch_idx)
        print("B{:3d} {:2.02f}% loss: {} ".format(
            batch_idx, 100. * num_correct / (7 * len(r[0])), r[2]))
        #if (100. * num_correct / len(r[0]) > 48.1):
        #  last_weights = [p.eval() for p in params]
        #  timestr = time.strftime("%Y%m%d_%H%M%S")
        #  numpy.savez("weights_{}_result.npz".format(timestr), *last_weights)

    def do_batch():
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        if batch_idx % report_steps == 0:
            do_report()

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.80)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        if initial_weights is not None:
            sess.run(assign_ops)

        #tf.summary.scalar('loss', loss)
        #tf.summary.scalar('digits', digits_loss)
        #tf.summary.scalar('presence', presence_loss)
        #merged = tf.summary.merge_all()

        #train_writer = tf.summary.FileWriter('./logs',sess.graph)

        test_xs, test_ys = unzip(
            list(read_data("dataset_210618_test/*.png"))[:114])

        try:
            last_batch_idx = 0
            last_batch_time = time.time()
            batch_iter = enumerate(read_batches(batch_size))
            for batch_idx, (batch_xs, batch_ys) in batch_iter:
                do_batch()
                if batch_idx % report_steps == 0:
                    batch_time = time.time()
                    if last_batch_idx != batch_idx:
                        print("time for 60 batches {}".format(
                            60 * (last_batch_time - batch_time) /
                            (last_batch_idx - batch_idx)))
                        last_batch_idx = batch_idx
                        last_batch_time = batch_time

        except KeyboardInterrupt:
            last_weights = [p.eval() for p in params]
            timestr = time.strftime("%Y%m%d_%H%M%S")
            numpy.savez("weights_{}.npz".format(timestr), *last_weights)
            return last_weights
Exemple #4
0
stepsize = 136106 #68053   // after each stepsize iterations update learning rate: lr=lr*gamma
max_iter = 200000 # 600000

# True = start data generator client, False = use augmented dataset file (deprecated)
use_client_gen = True

WEIGHTS_BEST = "weights.best.h5"
TRAINING_LOG = "training.csv"
LOGS_DIR = "./logs"

def get_last_epoch():
    data = pandas.read_csv(TRAINING_LOG)
    return max(data['epoch'].values)


model = get_training_model(weight_decay)

from_vgg = dict()
from_vgg['conv1_1'] = 'block1_conv1'
from_vgg['conv1_2'] = 'block1_conv2'
from_vgg['conv2_1'] = 'block2_conv1'
from_vgg['conv2_2'] = 'block2_conv2'
from_vgg['conv3_1'] = 'block3_conv1'
from_vgg['conv3_2'] = 'block3_conv2'
from_vgg['conv3_3'] = 'block3_conv3'
from_vgg['conv3_4'] = 'block3_conv4'
from_vgg['conv4_1'] = 'block4_conv1'
from_vgg['conv4_2'] = 'block4_conv2'

# load previous weights or vgg19 if this is the first run
if os.path.exists(WEIGHTS_BEST):
Exemple #5
0
def train(learn_rate, report_steps, batch_size, initial_weights=None):
    """
    Train the network.

    The function operates interactively: Progress is reported on stdout, and
    training ceases upon `KeyboardInterrupt` at which point the learned weights
    are saved to `weights.npz`, and also returned.

    :param learn_rate:
        Learning rate to use.

    :param report_steps:
        Every `report_steps` batches a progress report is printed.

    :param batch_size:
        The size of the batches used for training.

    :param initial_weights:
        (Optional.) Weights to initialize the network with.

    :return:
        The learned network weights.

    """
    global params
    x, y, params = model.get_training_model()

    y_ = tf.placeholder(tf.float32, [None, 7 * len(common.CHARS) + 1])

    digits_loss, presence_loss, loss = get_loss(y, y_)
    train_step = tf.train.AdamOptimizer(learn_rate).minimize(loss)

    best = tf.argmax(tf.reshape(y[:, 1:], [-1, 7, len(common.CHARS)]), 2)
    correct = tf.argmax(tf.reshape(y_[:, 1:], [-1, 7, len(common.CHARS)]), 2)

    if initial_weights is not None:
        assert len(params) == len(initial_weights)
        assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

    init = tf.initialize_all_variables()

    def vec_to_plate(v):
        return "".join(common.CHARS[i] for i in v)

    def do_report():
        r = sess.run([best,
                      correct,
                      tf.greater(y[:, 0], 0),
                      y_[:, 0],
                      digits_loss,
                      presence_loss,
                      loss],
                     feed_dict={x: test_xs, y_: test_ys})
        num_correct = numpy.sum(
                        numpy.logical_or(
                            numpy.all(r[0] == r[1], axis=1),
                            numpy.logical_and(r[2] < 0.5,
                                              r[3] < 0.5)))
        recall = numpy.sum(numpy.logical_and(numpy.all(r[0] == r[1], axis=1) , r[3] > 0.5))
        end = 300
        r_short = (r[0][:end], r[1][:end], r[2][:end], r[3][:end])
        #set_trace()
        for i,v in enumerate(zip(*r_short)):
            print "{} {} {} <-> {} {}".format(i, vec_to_plate(v[1]), v[3],
                                           vec_to_plate(v[0]), float(v[2]))
        num_p_correct = numpy.sum(r[2] == r[3])

        error_str = ""
        for i, v in enumerate(zip(*r_short)):
            if v[2] !=  v[3]:
                error_str += "{}:(presence) ".format(i)
            elif not numpy.array_equal(v[0], v[1]) and v[3] > 0.5:
                error_letters = v[0] != v[1]                
                for b,c in zip(v[0][error_letters], v[1][error_letters]):
                    error_str += "{}:{}->{} ".format(i, common.CHARS[c], common.CHARS[b])
        print ("B{:3d} num:{:2.02f}% recall:{:2.02f}% presence:{:02.02f}% loss: {} "
               "(digits: {}, presence: {}) |{}|").format(
            batch_idx,
            100. * num_correct / (len(r[0])),
            100. * recall / numpy.sum(r[3] > 0.5),
            100. * num_p_correct / len(r[2]),
            r[6]/len(r[0]),
            r[4]/len(r[0]),
            r[5]/len(r[0]),
            error_str)
#            "".join("X "[numpy.array_equal(v[0], v[1]) or (not v[2] and not v[3])]
#                                           for i, v in enumerate(zip(*r_short))))
#            " ".join(str(i) for i, v in enumerate(zip(*r_short)) 
#                    if not(numpy.array_equal(v[0], v[1]) or (not v[2] and not v[3]))))

    def do_batch():
        sess.run(train_step,
                 feed_dict={x: batch_xs, y_: batch_ys})
        if batch_idx % report_steps == 0:
            do_report()

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        if initial_weights is not None:
            sess.run(assign_ops)

        test_xs, test_ys = unzip(list(read_data("test/*.png"))[:300])

        try:
            last_batch_idx = 0
            last_batch_time = time.time()
            batch_iter = enumerate(read_batches(batch_size))
            for batch_idx, (batch_xs, batch_ys) in batch_iter:
                do_batch()
                if batch_idx % report_steps == 0:
                    batch_time = time.time()
                    if last_batch_idx != batch_idx:
                        print "time for 60 batches {}".format(
                            60 * (last_batch_time - batch_time) /
                                            (last_batch_idx - batch_idx))
                        last_batch_idx = batch_idx
                        last_batch_time = batch_time

        except KeyboardInterrupt:
            last_weights = [p.eval() for p in params]
            numpy.savez("weights.npz", *last_weights)
            return last_weights
Exemple #6
0
def get_last_epoch_and_weights_file():
    os.makedirs(WEIGHT_DIR, exist_ok=True)
    files = [file for file in glob(WEIGHT_DIR + '/weights.*.h5')]
    files = [file.split('/')[-1] for file in files]
    epochs = [file.split('.')[1] for file in files if file]
    epochs = [int(epoch) for epoch in epochs if epoch.isdigit()]
    if len(epochs) == 0:
        if 'weights.best.h5' in files:
            return -1, WEIGHT_DIR + '/weights.best.h5'
    else:
        ep = max([int(epoch) for epoch in epochs])
        return ep, WEIGHT_DIR + '/' + WEIGHTS_SAVE.format(epoch=ep)
    return None, None


model = get_training_model(weight_decay, gpus=use_multiple_gpus)

from_vgg = dict()
from_vgg['conv1_1'] = 'block1_conv1'
from_vgg['conv1_2'] = 'block1_conv2'
from_vgg['conv2_1'] = 'block2_conv1'
from_vgg['conv2_2'] = 'block2_conv2'
from_vgg['conv3_1'] = 'block3_conv1'
from_vgg['conv3_2'] = 'block3_conv2'
from_vgg['conv3_3'] = 'block3_conv3'
from_vgg['conv3_4'] = 'block3_conv4'
from_vgg['conv4_1'] = 'block4_conv1'
from_vgg['conv4_2'] = 'block4_conv2'

# load previous weights or vgg19 if this is the first run
last_epoch, wfile = get_last_epoch_and_weights_file()
Exemple #7
0
def train(learn_rate, report_steps, batch_size, initial_weights=None):
    """
    Train the network.

    The function operates interactively: Progress is reported on stdout, and
    training ceases upon `KeyboardInterrupt` at which point the learned weights
    are saved to `weights.npz`, and also returned.

    :param learn_rate:
        Learning rate to use.

    :param report_steps:
        Every `report_steps` batches a progress report is printed.

    :param batch_size:
        The size of the batches used for training.

    :param initial_weights:
        (Optional.) Weights to initialize the network with.

    :return:
        The learned network weights.

    """
    x, y, params = model.get_training_model()

    y_ = tf.placeholder(tf.float32, [None, 1 + len(model.CLASSES)])

    class_loss, presence_loss, loss = get_loss(y, y_)
    train_step = tf.train.AdamOptimizer(learn_rate).minimize(loss)

    best = tf.argmax(y[:, 1:], 1)
    correct = tf.argmax(y_[:, 1:], 1)

    if initial_weights is not None:
        assert len(params) == len(initial_weights)
        assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

    init = tf.global_variables_initializer()

    def do_report():
        r = sess.run([
            best, correct,
            tf.greater(y[:, 0], 0), y_[:, 0], class_loss, presence_loss, loss
        ],
                     feed_dict={
                         x: test_xs,
                         y_: test_ys
                     })

        r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190])
        for b, c, pb, pc in zip(*r_short):
            print "{} {} <-> {} {}".format(model.CLASSES[c], pc,
                                           model.CLASSES[b], float(pb))

        print("B{:3d} loss: {} ").format(batch_idx, r[6])

    def do_batch():
        #writer = tf.summary.FileWriter("./log", sess.graph)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        if batch_idx % report_steps == 0:
            do_report()

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        if initial_weights is not None:
            sess.run(assign_ops)

        test_xs, test_ys = unzip(list(read_data("test/*.png"))[:50])

        try:
            last_batch_idx = 0
            last_batch_time = time.time()
            batch_iter = enumerate(read_batches(batch_size))
            for batch_idx, (batch_xs, batch_ys) in batch_iter:
                do_batch()
                if batch_idx % report_steps == 0:
                    batch_time = time.time()
                    if last_batch_idx != batch_idx:
                        print "time for 60 batches {}".format(
                            60 * (last_batch_time - batch_time) /
                            (last_batch_idx - batch_idx))
                        last_batch_idx = batch_idx
                        last_batch_time = batch_time

        except KeyboardInterrupt:
            last_weights = [p.eval() for p in params]
            numpy.savez("weights.npz", *last_weights)
            return last_weights
Exemple #8
0
def train(learn_rate, report_steps, batch_size, initial_weights=None):
    """
    Train the network.

    The function operates interactively: Progress is reported on stdout, and
    training ceases upon `KeyboardInterrupt` at which point the learned weights
    are saved to `weights.npz`, and also returned.

    :param learn_rate:
        Learning rate to use.

    :param report_steps:
        Every `report_steps` batches a progress report is printed.

    :param batch_size:
        The size of the batches used for training.

    :param initial_weights:
        (Optional.) Weights to initialize the network with.

    :return:
        The learned network weights.

    """
    x, y, params = model.get_training_model()

    y_ = tf.placeholder(tf.float32, [None, 8 * len(common.CHARS) + 1])

    digits_loss, presence_loss, loss = get_loss(y, y_)
    train_step = tf.train.AdamOptimizer(learn_rate).minimize(loss)

    best = tf.argmax(tf.reshape(y[:, 1:], [-1, 8, len(common.CHARS)]), 2)
    correct = tf.argmax(tf.reshape(y_[:, 1:], [-1, 8, len(common.CHARS)]), 2)

    if initial_weights is not None:
        assert len(params) == len(initial_weights)
        assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

    init = tf.global_variables_initializer()

    def vec_to_plate(v):
        return "".join(common.CHARS[i] for i in v)

    def do_report():
        r = sess.run([
            best, correct,
            tf.greater(y[:, 0], 0), y_[:, 0], digits_loss, presence_loss, loss
        ],
                     feed_dict={
                         x: test_xs,
                         y_: test_ys
                     })
        num_correct = numpy.sum(
            numpy.logical_or(numpy.all(r[0] == r[1], axis=1),
                             numpy.logical_and(r[2] < 0.5, r[3] < 0.5)))
        r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190])
        for b, c, pb, pc in zip(*r_short):
            print "{} {} <-> {} {}".format(vec_to_plate(c), pc,
                                           vec_to_plate(b), float(pb))
        num_p_correct = numpy.sum(r[2] == r[3])

        print(
            "B{:3d} {:2.02f}% {:02.02f}% loss: {} "
            "(digits: {}, presence: {}) |{}|").format(
                batch_idx, 100. * num_correct / (len(r[0])),
                100. * num_p_correct / len(r[2]), r[6], r[4], r[5],
                "".join("X "[numpy.array_equal(b, c) or (not pb and not pc)]
                        for b, c, pb, pc in zip(*r_short)))

    def do_batch():
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        if batch_idx % report_steps == 0:
            do_report()

    #Revised by Juyi on 2017/10/24
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
# with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
    config = tf.ConfigProto()
    config.gpu_options.allocator_type = 'BFC'
    with tf.Session(config=config) as sess:

        sess.run(init)
        if initial_weights is not None:
            sess.run(assign_ops)


#modify by joying original is 50
        test_xs, test_ys = unzip(list(read_data("test8/*.png"))[:50])

        try:
            last_batch_idx = 0
            last_batch_time = time.time()
            batch_iter = enumerate(read_batches(batch_size))
            for batch_idx, (batch_xs, batch_ys) in batch_iter:
                do_batch()
                if batch_idx % report_steps == 0:
                    batch_time = time.time()
                    if last_batch_idx != batch_idx:
                        print "time for 60 batches {}".format(
                            60 * (last_batch_time - batch_time) /
                            (last_batch_idx - batch_idx))
                        last_batch_idx = batch_idx
                        last_batch_time = batch_time

        except KeyboardInterrupt:
            last_weights = [p.eval() for p in params]
            numpy.savez("weights8.npz", *last_weights)
            return last_weights
Exemple #9
0
def detect(initial_weights=None):
    """
    Test the network.

    The function operates interactively: Progress is reported on stdout, and
    training ceases upon `KeyboardInterrupt` at which point the learned weights
    are saved to `weights.npz`, and also returned.

    :param initial_weights:
        (Optional.) Weights to initialize the network with.

    """
    x, y, params = model.get_training_model()

    y_ = tf.placeholder(tf.float32, [None, 7 * len(common.CHARS) + 1])

    digits_loss, presence_loss, loss = get_loss(y, y_)

    best = tf.argmax(tf.reshape(y[:, 1:], [-1, 7, len(common.CHARS)]), 2)
    correct = tf.argmax(tf.reshape(y_[:, 1:], [-1, 7, len(common.CHARS)]), 2)

    assert len(params) == len(initial_weights)
    assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

    #init = tf.initialize_all_variables() # deprecated
    init = tf.global_variables_initializer()

    def vec_to_plate(v):
        return "".join(common.CHARS[i] for i in v)

    def do_report():
        r = sess.run([
            best, correct,
            tf.greater(y[:, 0], 0), y_[:, 0], digits_loss, presence_loss, loss
        ],
                     feed_dict={
                         x: test_xs,
                         y_: test_ys
                     })
        num_correct = numpy.sum(
            numpy.logical_or(numpy.all(r[0] == r[1], axis=1),
                             numpy.logical_and(r[2] < 0.5, r[3] < 0.5)))
        num = numpy.equal(r[0], r[1])
        num_correct_chars = numpy.sum(num)
        r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190])
        for b, c, pb, pc in zip(*r_short):
            print("{} {} <-> {} {}".format(vec_to_plate(c), pc,
                                           vec_to_plate(b), float(pb)))

        print(
            "Num plates: {} corrChars: {:2.02f}% corrLPs: {:2.02f}% loss: {} (digits: {}, presence: {})"
            .format(len(r[0]), 100. * num_correct_chars / (len(r[0]) * 7),
                    100. * num_correct / (len(r[0])), r[6], r[4], r[5]))

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        sess.run(assign_ops)
        test_xs, test_ys = unzip(list(read_data("pickering_test/*.png"))[:])

        do_report()
stepsize = 136106 #68053   // after each stepsize iterations update learning rate: lr=lr*gamma
max_iter = 200000 # 600000

# True = start data generator client, False = use augmented dataset file (deprecated)
use_client_gen = True

WEIGHTS_BEST = "weights.best.h5"
TRAINING_LOG = "training.csv"
LOGS_DIR = "./logs"

def get_last_epoch():
    data = pandas.read_csv(TRAINING_LOG)
    return max(data['epoch'].values)


model = get_training_model(weight_decay)

from_vgg = dict()
from_vgg['conv1_1'] = 'block1_conv1'
from_vgg['conv1_2'] = 'block1_conv2'
from_vgg['conv2_1'] = 'block2_conv1'
from_vgg['conv2_2'] = 'block2_conv2'
from_vgg['conv3_1'] = 'block3_conv1'
from_vgg['conv3_2'] = 'block3_conv2'
from_vgg['conv3_3'] = 'block3_conv3'
from_vgg['conv3_4'] = 'block3_conv4'
from_vgg['conv4_1'] = 'block4_conv1'
from_vgg['conv4_2'] = 'block4_conv2'

# load previous weights or vgg19 if this is the first run
if os.path.exists(WEIGHTS_BEST):
Exemple #11
0
def train(learn_rate, report_steps, batch_size, initial_weights=None):
    '''
    plt_epoch = []
    plt_loss = []
    '''
    """
    Train the network.
    The function operates interactively: Progress is reported on stdout, and
    training ceases upon `KeyboardInterrupt` at which point the learned weights
    are saved to `weights.npz`, and also returned.
    :param learn_rate:
        Learning rate to use.
    :param report_steps:
        Every `report_steps` batches a progress report is printed.
    :param batch_size:
        The size of the batches used for training.
    :param initial_weights:
        (Optional.) Weights to initialize the network with.
    :return:
        The learned network weights.
    """
    x, y, params = model.get_training_model()

    y_ = tf.placeholder(tf.float32, [None, 7 * len(common.CHARS) + 1])

    digits_loss, presence_loss, loss = get_loss(y, y_)
    train_step = tf.train.AdamOptimizer(learn_rate).minimize(loss)

    best = tf.argmax(tf.reshape(y[:, 1:], [-1, 7, len(common.CHARS)]), 2)
    correct = tf.argmax(tf.reshape(y_[:, 1:], [-1, 7, len(common.CHARS)]), 2)

    if initial_weights is not None:
        assert len(params) == len(initial_weights)
        assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

    init = tf.global_variables_initializer()

    def vec_to_plate(v):
        return "".join(common.CHARS[i] for i in v)

    def do_report():
        r = sess.run([
            best, correct,
            tf.greater(y[:, 0], 0), y_[:, 0], digits_loss, presence_loss, loss
        ],
                     feed_dict={
                         x: test_xs,
                         y_: test_ys
                     })
        num_correct = numpy.sum(
            numpy.logical_or(numpy.all(r[0] == r[1], axis=1),
                             numpy.logical_and(r[2] < 0.5, r[3] < 0.5)))
        r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190])
        num_p_correct = numpy.sum(r[2] == r[3])
        print("Batch:", batch_idx, "Digit loss:", r[4], "Presence_loss", r[5],
              "Loss:", r[6])
        '''
        plt_epoch.append(batch_idx)
        plt_loss.append(r[6])
        '''

    def do_batch():
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        if batch_idx % report_steps == 0:
            do_report()

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        if initial_weights is not None:
            sess.run(assign_ops)

        test_xs, test_ys = unzip(
            list(read_data("E:/project data/test/*.png"))[:50])

        try:
            last_batch_idx = 0
            last_batch_time = time.time()
            batch_iter = enumerate(read_batches(batch_size))
            for batch_idx, (batch_xs, batch_ys) in batch_iter:
                do_batch()
                if batch_idx % report_steps == 0:
                    batch_time = time.time()
                    if last_batch_idx != batch_idx:
                        last_batch_idx = batch_idx
                        last_batch_time = batch_time

        except KeyboardInterrupt:
            '''
            plt.plot(plt_epoch, plt_loss)
            plt.xlabel('epoch')
            plt.ylabel('loss')
            plt.title('epoch-loss graph')
            fig = plt.gcf()
            plt.show()
            fig.savefig('loss_graph.pdf')
            '''
            last_weights = [p.eval() for p in params]
            numpy.savez("weights0417ver_mini_train.npz", *last_weights)
            return last_weights
Exemple #12
0
def train(report_steps, batch_size, initial_weights=None):
    """
    Train the network.

    The function operates interactively: Progress is reported on stdout, and
    training ceases upon `KeyboardInterrupt` at which point the learned weights
    are saved to `weights.npz`, and also returned.

    :param learn_rate:
        Learning rate to use.

    :param report_steps:
        Every `report_steps` batches a progress report is printed.

    :param batch_size:
        The size of the batches used for training.

    :param initial_weights:
        (Optional.) Weights to initialize the network with.

    :return:
        The learned network weights.

    """
    global_step = tf.Variable(0, trainable=False)
    batch_idx = tf.placeholder(tf.int32)
    learning_rate = tf.train.exponential_decay(
        common.INITIAL_LEARNING_RATE,
        global_step,
        common.DECAY_STEPS,
        common.LEARNING_RATE_DECAY_FACTOR,
        staircase=True)
    # y is the predicted value
    x, y, params = model.get_training_model()

    y_ = tf.placeholder(tf.float32, [None, common.LENGTH * len(common.CHARS)])

    digits_loss = tf.nn.softmax_cross_entropy_with_logits(
        tf.reshape(y, [-1, len(common.CHARS)]),
        tf.reshape(y_, [-1, len(common.CHARS)]))
    cross_entropy = tf.reduce_sum(digits_loss)

    train_step = tf.train.AdamOptimizer(learning_rate).minimize(
        cross_entropy, global_step=global_step)

    predict = tf.argmax(tf.reshape(
        y, [-1, common.LENGTH, len(common.CHARS)]), 2)

    real_value = tf.argmax(
        tf.reshape(y_,
                   [-1, common.LENGTH, len(common.CHARS)]), 2)

    if initial_weights is not None:
        assert len(params) == len(initial_weights)
        assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

    init = tf.initialize_all_variables()

    def vec_to_plate(v):
        return "".join(common.CHARS[i] for i in v)

    def do_report():
        r = sess.run(
            [predict, real_value, cross_entropy, learning_rate, global_step],
            feed_dict={
                x: test_xs,
                y_: test_ys
            })
        num_correct = numpy.sum(numpy.all(r[0] == r[1], axis=1))
        r_short = (r[0][:common.TEST_SIZE], r[1][:common.TEST_SIZE])
        print "{} <--> {} ".format("real_value", "predict_value")
        for pred, real in zip(*r_short):
            print "{} <--> {} ".format(vec_to_plate(real), vec_to_plate(pred))

        print(
            "batch:{:3d}, hit_rate:{:2.02f}%,cross_entropy:{}, learning_rate:{},global_step:{} "
        ).format(batch_idx, 100. * num_correct / (len(r[0])), r[2], r[3], r[4])

        last_weights = [p.eval() for p in params]
        numpy.savez("weights.npz", *last_weights)

    def do_batch():
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        if batch_idx % report_steps == 0:
            do_report()

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        if initial_weights is not None:
            sess.run(assign_ops)

        test_xs, test_ys = unzip(
            list(read_data("test/*.png"))[:common.TEST_SIZE])
        # print "test_xs.shape:{}".format(test_xs.shape)

        try:
            last_batch_idx = 0
            last_batch_time = time.time()
            batch_iter = enumerate(read_batches(batch_size))
            for batch_idx, (batch_xs, batch_ys) in batch_iter:
                # print "batch_ys.shape():{}".format(batch_ys.shape)
                do_batch()
                if batch_idx % report_steps == 0:
                    batch_time = time.time()
                    if last_batch_idx != batch_idx:
                        print "time for 60 batches {}s".format(
                            60 * (last_batch_time - batch_time) /
                            (last_batch_idx - batch_idx))
                        last_batch_idx = batch_idx
                        last_batch_time = batch_time

        except KeyboardInterrupt:

            last_weights = [p.eval() for p in params]
            numpy.savez("weights.npz", *last_weights)
            return last_weights
Exemple #13
0
def train(learn_rate, report_steps, batch_size, initial_weights=None):
    """
    Train the network.

    The function operates interactively: Progress is reported on stdout, and
    training ceases upon `KeyboardInterrupt` at which point the learned weights
    are saved to `weights.npz`, and also returned.

    :param learn_rate:
        Learning rate to use.

    :param report_steps:
        Every `report_steps` batches a progress report is printed.

    :param batch_size:
        The size of the batches used for training.

    :param initial_weights:
        (Optional.) Weights to initialize the network with.

    :return:
        The learned network weights.

    """
    x, y, params = model.get_training_model(
    )  # 获取训练网络(3个卷积层+2个全连接层)的输入,输出以及所有参数

    y_ = tf.placeholder(
        tf.float32,
        [None, 1 + 7 * len(common.CHARS)])  # 给定网络输入对应的真输出[n, 1+7*36]

    digits_loss, presence_loss, loss = get_loss(y, y_)  # 获取损失函数
    train_step = tf.train.AdamOptimizer(learn_rate).minimize(
        loss)  # 采用Adam优化器对损失函数进行优化

    best = tf.argmax(tf.reshape(y[:, 1:], [-1, 7, len(common.CHARS)]),
                     2)  # 7位车牌号的每一位对应概率最大的字符的下标
    correct = tf.argmax(tf.reshape(y_[:, 1:], [-1, 7, len(common.CHARS)]), 2)

    if initial_weights is not None:
        assert len(params) == len(initial_weights)
        assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

    init = tf.initialize_all_variables()  # 初始化所有变量

    def vec_to_plate(v):
        return "".join(common.CHARS[i] for i in v)

    def do_report():
        r = sess.run(
            [
                best,  # 提供测试数据集,执行测试操作
                correct,
                tf.greater(y[:, 0], 0),
                y_[:, 0],
                digits_loss,
                presence_loss,
                loss
            ],
            feed_dict={
                x: test_xs,
                y_: test_ys
            })
        num_correct = numpy.sum(  # 车牌存在的情况下,车牌号识别正确的个数
            numpy.logical_or(numpy.all(r[0] == r[1], axis=1),
                             numpy.logical_and(r[2] < 0.5, r[3] < 0.5)))
        r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190])
        for b, c, pb, pc in zip(*r_short):
            print "{} {} <-> {} {}".format(
                vec_to_plate(c),
                pc,  # 输出:给定的7位车牌号 给定的车牌存在与否标志位
                vec_to_plate(b),
                float(pb))  #       <-> 测试得到的7位车牌号 测试得到的车牌存在概率
        num_p_correct = numpy.sum(r[2] == r[3])  # 车牌存在与否判断正确的个数

        print(
            "B{:3d} {:2.02f}% {:02.02f}% loss: {} "  # 输出:B第n批次 车牌号识别准确率 车牌存在与否判断准确率 loss:总偏差
            "(digits: {}, presence: {}) |{}|"
        ).format(  #       (digits:车牌号识别偏差, presence:车牌存在与否判断偏差)
            batch_idx,  #       |batch_size个2类字符| 识别错误显示字符'X',识别正确显示字符' '
            100. * num_correct / (len(r[0])),
            100. * num_p_correct / len(r[2]),
            r[6],
            r[4],
            r[5],
            "".join("X "[numpy.array_equal(b, c) or (not pb and not pc)]
                    for b, c, pb, pc in zip(*r_short)))

    def do_batch():
        sess.run(
            train_step,  # 批量提供训练数据集,执行训练操作
            feed_dict={
                x: batch_xs,
                y_: batch_ys
            })
        if batch_idx % report_steps == 0:  # 训练1个回合进行1次测试,并显示相关指标
            do_report()  # 显示训练效果

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
    with tf.Session(config=tf.ConfigProto(
            gpu_options=gpu_options)) as sess:  # 配置GPU资源,建立会话
        #with tf.device("/gpu:1"):
        sess.run(init)  # 执行变量初始化
        if initial_weights is not None:
            sess.run(assign_ops)

        test_xs, test_ys = unzip(list(
            read_data("syndata/*.png"))[:50])  # 取前50个样本作为测试集

        try:
            last_batch_idx = 0
            last_batch_time = time.time()
            batch_iter = enumerate(
                read_batches(batch_size))  # 枚举出batch_size个训练数据集和对应下标
            for batch_idx, (batch_xs, batch_ys) in batch_iter:
                do_batch()  # 批处理
                if batch_idx % report_steps == 0:
                    batch_time = time.time()
                    if last_batch_idx != batch_idx:
                        print "time for 60 batches {}".format(  # 计算训练60批次(1批batch_size张图片)的用时
                            60 * (last_batch_time - batch_time) /
                            (last_batch_idx - batch_idx))
                        last_batch_idx = batch_idx
                        last_batch_time = batch_time

        except KeyboardInterrupt:  # 出现键盘中断异常,则保存此刻的模型参数值到weights.npz文件
            last_weights = [p.eval() for p in params]
            numpy.savez("weights.npz", *last_weights)
            return last_weights
Exemple #14
0
def train(learn_rate, report_steps, batch_size, initial_weights=None):
    x, y, params = model.get_training_model()
    y_ = tf.placeholder(tf.float32, [None, 7 * len(constants.CHARS) + 1])
    digits_loss = tf.nn.softmax_cross_entropy_with_logits(
        tf.reshape(y[:, 1:], [-1, len(constants.CHARS)]),
        tf.reshape(y_[:, 1:], [-1, len(constants.CHARS)]))
    digits_loss = tf.reduce_sum(digits_loss)
    presence_loss = 10. * tf.nn.sigmoid_cross_entropy_with_logits(
        y[:, :1], y_[:, :1])
    presence_loss = tf.reduce_sum(presence_loss)
    cross_entropy = digits_loss + presence_loss
    train_step = tf.train.AdamOptimizer(learn_rate).minimize(cross_entropy)
    
    best = tf.argmax(tf.reshape(y[:, 1:], [-1, 7, len(constants.CHARS)]), 2)
    correct = tf.argmax(tf.reshape(y_[:, 1:], [-1, 7, len(constants.CHARS)]), 2)
    
    if initial_weights is not None:
        assert len(params) == len(initial_weights)
        assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]
        
    init = tf.initialize_all_variables()
    
    def vec_to_number(v):
        return "".join(constants.CHARS[i] for i in v)
        
    def do_report():
        r = sess.run([best, correct, tf.greater(y[:, 0], 0),
                      y_[:, 0], digits_loss, presence_loss, cross_entropy],
                        feed_dict={x: test_xs, y_: test_ys})
        num_correct = numpy.sum(numpy.logical_or(
            numpy.all(r[0] == r[1], axis=1),
            numpy.logical_and(r[2] < 0.5, r[3] < 0.5)))
        r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190])
        for b, c, pb, pc in zip(*r_short):
            print("{} {} <-> {} {}".format(vec_to_number(c), pc,
                                            vec_to_number(b), float(pb)))
        num_p_correct = numpy.sum(r[2] == r[3])
        
        print("B{:3d} {:2.02f}% {:02.02f}% loss: {} "
                "(digits: {}, presence: {}) |{}|".format(
                batch_idx, 100. * num_correct / (len(r[0])),
                100. * num_p_correct / len(r[2]), r[6], r[4], r[5],
                "".join("X "[numpy.array_equal(b, c) or (not pb and not pc)]
                    for b, c, pb, pc in zip(*r_short))))
                        
    def do_batch():
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        if batch_idx % report_steps == 0:
            do_report()
            
    with tf.Session as sess:
        sess.run(init)
        if initial_weights is not None:
            sess.run(assign_ops)

        test_xs, test_ys = unzip(list(read_data("test/*.png"))[:50])

        try:
            last_batch_idx = 0
            last_batch_time = time.time()
            batch_iter = enumerate(read_batches(batch_size))
            for batch_idx, (batch_xs, batch_ys) in batch_iter:
                do_batch()
                if batch_idx % report_steps == 0:
                    batch_time = time.time()
                    if last_batch_idx != batch_idx:
                        print("time for 60 batches {}".format(
                            60 * (last_batch_time - batch_time) /
                                            (last_batch_idx - batch_idx)))
                        last_batch_idx = batch_idx
                        last_batch_time = batch_time

        except KeyboardInterrupt:
            last_weights = [p.eval() for p in params]
            numpy.savez("weights.npz", *last_weights)
            return last_weights
def train(starter_learning_rate, report_steps, batch_size, initial_weights):
    """
    Train the network.
    The function operates interactively: Progress is reported on stdout, and
    training ceases upon `KeyboardInterrupt` at which point the learned weights
    are saved to `weights.npz`, and also returned.
    :param learn_rate:
        Learning rate to use.
    :param report_steps:
        Every `report_steps` batches a progress report is printed.
    :param batch_size:
        The size of the batches used for training.
    :param initial_weights:
        (Optional.) Weights to initialize the network with.
    :return:
        The learned network weights.
    """

    x, y, params = model.get_training_model()

    y_ = tf.placeholder(tf.float32,
                        [None, 10 * len(CHARS) + 1])  #7 changed to 10
    #y_ = tf.placeholder(tf.float32, [None, 729 ]) #7 changed to 10
    #y_ = tf.placeholder(tf.int32, [None, 10 * len(CHARS) + 1]) #7 changed to 10
    #y_ = tf.one_hot( y_ , 621 )
    #y_ = tf.squeeze( y_ , 0)

    digits_loss, presence_loss, loss = get_loss(y, y_)
    #train_step = tf.train.AdamOptimizer(learn_rate).minimize(loss)

    global_step = tf.Variable(0, trainable=False)
    #starter_learning_rate = 0.1
    learn_rate = tf.train.exponential_decay(starter_learning_rate,
                                            global_step,
                                            1000,
                                            0.96,
                                            staircase=True)

    train_step = (tf.train.AdamOptimizer(learn_rate).minimize(
        loss, global_step=global_step))

    best = tf.argmax(tf.reshape(y[:, 1:], [-1, 10, len(CHARS)]),
                     2)  # 7 changed to 10
    correct = tf.argmax(tf.reshape(y_[:, 1:], [-1, 10, len(CHARS)]),
                        2)  #7 changed to 10

    if initial_weights is not None:
        assert len(params) == len(initial_weights)
        assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

    init = tf.initialize_all_variables()

    def vec_to_plate(v):
        return "".join(CHARS[i] for i in v)

    def do_report():
        r = sess.run([
            best, correct,
            tf.greater(y[:, 0], 0), y_[:, 0], digits_loss, presence_loss, loss
        ],
                     feed_dict={
                         x: test_xs,
                         y_: test_ys
                     })
        num_correct = numpy.sum(
            numpy.logical_or(numpy.all(r[0] == r[1], axis=1),
                             numpy.logical_and(r[2] < 0.5, r[3] < 0.5)))
        r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190])
        for b, c, pb, pc in zip(*r_short):
            #print "{} {} <-> {} {}".format(vec_to_plate(c), pc,
            #vec_to_plate(b), float(pb)) # print command needs to be changed according to python3
            print("{} {} <-> {} {}".format(vec_to_plate(c), pc,
                                           vec_to_plate(b), float(pb)))
        num_p_correct = numpy.sum(r[2] == r[3])

        print("B{:3d} {:2.02f}% {:02.02f}% loss: {} "
              "(digits: {}, presence: {}) |{}|".format(
                  batch_idx, 100. * num_correct / (len(r[0])),
                  100. * num_p_correct / len(r[2]), r[6], r[4], r[5],
                  "".join("X "[numpy.array_equal(b, c) or (not pb and not pc)]
                          for b, c, pb, pc in zip(*r_short))))

    def do_batch():
        #batch_xs = numpy.reshape(batch_xs, (None, 621))
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        if batch_idx % report_steps == 0:
            do_report()

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        if initial_weights is not None:
            sess.run(assign_ops)

        test_xs, test_ys = unzip(list(read_data("test/*.png"))[:50])

        try:
            last_batch_idx = 0
            last_batch_time = time.time()
            batch_iter = enumerate(read_batches(batch_size))
            for batch_idx, (batch_xs, batch_ys) in batch_iter:
                do_batch()
                if batch_idx % report_steps == 0:
                    batch_time = time.time()
                    if last_batch_idx != batch_idx:
                        print("time for 60 batches {}".format(
                            60 * (last_batch_time - batch_time) /
                            (last_batch_idx - batch_idx)))
                        last_batch_idx = batch_idx
                        last_batch_time = batch_time

        except KeyboardInterrupt:
            last_weights = [p.eval() for p in params]
            numpy.savez("weights.npz", *last_weights)
            return last_weights
Exemple #16
0
def train(learn_rate, report_steps, batch_size, initial_weights=None):
    x,y,params = model.get_training_model()

    y_ = tf.placeholder(tf.float32, [None, len(common.OUTCOMES)])

    loss = get_loss(y, y_)
    train_step = tf.train.AdamOptimizer(learn_rate).minimize(loss)

    best = tf.argmax(tf.reshape(y[:,:], [-1, 1, len(common.OUTCOMES)]), 2)
    correct = tf.argmax(tf.reshape(y_[:,1:], [-1, 1, len(common.OUTCOMES)]), 2)

    if initial_weights is not None:
        assert len(params) == len(initial_weights)
        assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

    init = tf.initialize_all_variables()

    def vec_to_result(v):
        return "".join(common.OUTCOMES[i] for i in v)

    def do_report():
        #r = sess.run([best,
        #                correct,
        #                tf.greater(y[:, 0], 0),
        #                y_,
        #                loss],
        #                feed_dict = {x: test_xs, y_: test_ys})
        r = sess.run(y, feed_dict = {x: test_xs, y_: test_ys})
        print('Outcome for testing images: {}'.format(r))

        #num_correct = numpy.sum(r[0] == r[1], axis=0)

        #r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190])
        #for b, c, pb, pc in zip(*r_short):
        #    print("{} {} <-> {} {}".format(vec_to_result(c), pc,
        #                                   vec_to_result(b), float(pb)))

        #num_p_correct = numpy.sum(r[2] == r[3])

        #print ('B{:3d} {:2.02f}% {:02.02f}% loss: {} (digits: {}, presence: {}) |{}|').format(
        #    batch_idx,
        #    100. * num_correct / (len(r[0])),
        #    100. * num_p_correct / len(r[2]),
        #    r[6],
        #    r[4],
        #    r[5],
        #    "".join("X "[numpy.array_equal(b, c) or (not pb and not pc)]
        #                                   for b, c, pb, pc in zip(*r_short)))
        
    def do_batch():
        # print(batch_xs.shape)
        # print(batch_ys.shape)
        sess.run(train_step,
                 feed_dict={x: batch_xs, y_:batch_ys})
        if batch_idx % report_steps == 0:
            do_report()
    
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        if initial_weights is not None:
            sess.run(assign_ops)

        test_xs, test_ys = unzip(list(read_data("Nodules/Positive/*.jpg", "Nodules/Negative/*.jpg"))[:common.TEST_NUM])		#First some images for testing
        
        try:
            while True:
                last_batch_idx = 0
                last_batch_time = time.time()
                batch_iter = enumerate(read_training_batch(batch_size))

                for batch_idx, (batch_xs, batch_ys) in batch_iter:
                    do_batch()
                    if batch_idx % report_steps == 0:
                        batch_time = time.time()
                        if last_batch_idx != batch_idx:
                            print("time for 60 batches {}".format(
                                60*(last_batch_time - batch_time) /
                                            (last_batch_idx - batch_idx)))
                            last_batch_idx = batch_idx
                            last_batch_time = batch_time
        
        except KeyboardInterrupt:
            last_weights = [p.eval() for p in params]
            numpy.savez("weights.npz", *last_weights)
            return last_weights
def prepare(config,
            config_name,
            exp_id,
            train_samples,
            val_samples,
            batch_size,
            epoch=None):

    metrics_id = config_name + "_" + exp_id if exp_id is not None else config_name
    weights_id = config_name + "/" + exp_id if exp_id is not None else config_name

    WEIGHT_DIR = "./" + weights_id
    WEIGHTS_SAVE = 'weights.{epoch:04d}.h5'

    TRAINING_LOG = "./" + metrics_id + ".csv"
    LOGS_DIR = "./logs"

    model = get_training_model(weight_decay,
                               np_branch1=config.paf_layers,
                               np_branch2=config.heat_layers + 1)
    lr_mult = get_lrmult(model)

    # load previous weights or vgg19 if this is the first run
    last_epoch, wfile = get_last_epoch_and_weights_file(
        WEIGHT_DIR, WEIGHTS_SAVE, epoch)
    print("last_epoch:", last_epoch)

    if wfile is not None:
        print("Loading %s ..." % wfile)

        model.load_weights(wfile)

    else:
        print("Loading vgg19 weights...")

        vgg_model = VGG19(include_top=False, weights='imagenet')

        from_vgg = dict()
        from_vgg['conv1_1'] = 'block1_conv1'
        from_vgg['conv1_2'] = 'block1_conv2'
        from_vgg['conv2_1'] = 'block2_conv1'
        from_vgg['conv2_2'] = 'block2_conv2'
        from_vgg['conv3_1'] = 'block3_conv1'
        from_vgg['conv3_2'] = 'block3_conv2'
        from_vgg['conv3_3'] = 'block3_conv3'
        from_vgg['conv3_4'] = 'block3_conv4'
        from_vgg['conv4_1'] = 'block4_conv1'
        from_vgg['conv4_2'] = 'block4_conv2'

        for layer in model.layers:
            if layer.name in from_vgg:
                vgg_layer_name = from_vgg[layer.name]
                layer.set_weights(
                    vgg_model.get_layer(vgg_layer_name).get_weights())
                print("Loaded VGG19 layer: " + vgg_layer_name)

        last_epoch = 0

    # euclidean loss as implemented in caffe https://github.com/BVLC/caffe/blob/master/src/caffe/layers/euclidean_loss_layer.cpp
    def eucl_loss(x, y):
        l = K.sum(K.square(x - y)) / batch_size / 2
        return l

    # learning rate schedule - equivalent of caffe lr_policy =  "step"
    iterations_per_epoch = train_samples // batch_size

    def step_decay(epoch):
        steps = epoch * iterations_per_epoch * batch_size
        lrate = base_lr * math.pow(gamma, math.floor(steps / stepsize))
        print("Epoch:", epoch, "Learning rate:", lrate)
        return lrate

    print("Weight decay policy...")
    for i in range(1, 100, 5):
        step_decay(i)

    # configure callbacks
    lrate = LearningRateScheduler(step_decay)
    checkpoint = ModelCheckpoint(WEIGHT_DIR + '/' + WEIGHTS_SAVE,
                                 monitor='loss',
                                 verbose=0,
                                 save_best_only=False,
                                 save_weights_only=True,
                                 mode='min',
                                 period=1)
    csv_logger = CSVLogger(TRAINING_LOG, append=True)
    tb = TensorBoard(log_dir=LOGS_DIR,
                     histogram_freq=0,
                     write_graph=True,
                     write_images=False)
    tnan = TerminateOnNaN()
    #coco_eval = CocoEval(train_client, val_client)

    callbacks_list = [lrate, checkpoint, csv_logger, tb, tnan]

    # sgd optimizer with lr multipliers
    multisgd = MultiSGD(lr=base_lr,
                        momentum=momentum,
                        decay=0.0,
                        nesterov=False,
                        lr_mult=lr_mult)

    # start training

    model.compile(loss=eucl_loss, optimizer=multisgd)

    return model, iterations_per_epoch, val_samples // batch_size, last_epoch, metrics_id, callbacks_list
Exemple #18
0
def train(learn_rate, report_steps, batch_size, initial_weights=None):
    """
    Train the network.

    The function operates interactively: Progress is reported on stdout, and
    training ceases upon `KeyboardInterrupt` at which point the learned weights
    are saved to `weights.npz`, and also returned.

    :param learn_rate:
        Learning rate to use.

    :param report_steps:
        Every `report_steps` batches a progress report is printed.

    :param batch_size:
        The size of the batches used for training.

    :param initial_weights:
        (Optional.) Weights to initialize the network with.

    :return:
        The learned network weights.

    """
    x, y, params = model.get_training_model()

    y_ = tf.placeholder(tf.float32, [None, 7 * len(common.CHARS) + 1])

    digits_loss = tf.nn.softmax_cross_entropy_with_logits(
                                          tf.reshape(y[:, 1:],
                                                     [-1, len(common.CHARS)]),
                                          tf.reshape(y_[:, 1:],
                                                     [-1, len(common.CHARS)]))
    digits_loss = tf.reduce_sum(digits_loss)
    presence_loss = 10. * tf.nn.sigmoid_cross_entropy_with_logits(
                                                          y[:, :1], y_[:, :1])
    presence_loss = tf.reduce_sum(presence_loss)
    cross_entropy = digits_loss + presence_loss
    train_step = tf.train.AdamOptimizer(learn_rate).minimize(cross_entropy)

    best = tf.argmax(tf.reshape(y[:, 1:], [-1, 7, len(common.CHARS)]), 2)
    correct = tf.argmax(tf.reshape(y_[:, 1:], [-1, 7, len(common.CHARS)]), 2)

    if initial_weights is not None:
        assert len(params) == len(initial_weights)
        assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

    init = tf.initialize_all_variables()

    def vec_to_plate(v):
        return "".join(common.CHARS[i] for i in v)

    def do_report():
        r = sess.run([best,
                      correct,
                      tf.greater(y[:, 0], 0),
                      y_[:, 0],
                      digits_loss,
                      presence_loss,
                      cross_entropy],
                     feed_dict={x: test_xs, y_: test_ys})
        num_correct = numpy.sum(
                        numpy.logical_or(
                            numpy.all(r[0] == r[1], axis=1),
                            numpy.logical_and(r[2] < 0.5,
                                              r[3] < 0.5)))
        r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190])
        for b, c, pb, pc in zip(*r_short):
            print "{} {} <-> {} {}".format(vec_to_plate(c), pc,
                                           vec_to_plate(b), float(pb))
        num_p_correct = numpy.sum(r[2] == r[3])

        print ("B{:3d} {:2.02f}% {:02.02f}% loss: {} "
               "(digits: {}, presence: {}) |{}|").format(
            batch_idx,
            100. * num_correct / (len(r[0])),
            100. * num_p_correct / len(r[2]),
            r[6],
            r[4],
            r[5],
            "".join("X "[numpy.array_equal(b, c) or (not pb and not pc)]
                                           for b, c, pb, pc in zip(*r_short)))

    def do_batch():
        sess.run(train_step,
                 feed_dict={x: batch_xs, y_: batch_ys})
        if batch_idx % report_steps == 0:
            do_report()

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        if initial_weights is not None:
            sess.run(assign_ops)

        test_xs, test_ys = unzip(list(read_data("test/*.png"))[:50])

        try:
            last_batch_idx = 0
            last_batch_time = time.time()
            batch_iter = enumerate(read_batches(batch_size))
            for batch_idx, (batch_xs, batch_ys) in batch_iter:
                do_batch()
                if batch_idx % report_steps == 0:
                    batch_time = time.time()
                    if last_batch_idx != batch_idx:
                        print "time for 60 batches {}".format(
                            60 * (last_batch_time - batch_time) /
                                            (last_batch_idx - batch_idx))
                        last_batch_idx = batch_idx
                        last_batch_time = batch_time

        except KeyboardInterrupt:
            last_weights = [p.eval() for p in params]
            numpy.savez("weights.npz", *last_weights)
            return last_weights
Exemple #19
0
def train(learn_rate, report_steps, batch_size, initial_weights=None):
    """
    1 interactively
    2 `KeyboardInterrupt`
    3 learned weights  are saved to `weights.npz`, and also returned.
    """
    x, y, params = model.get_training_model()

    y_ = tf.placeholder(tf.float32, [None, 7 * len(common.CHARS) + 1])

    digits_loss, presence_loss, loss = get_loss(y, y_)
    train_step = tf.train.AdamOptimizer(learn_rate).minimize(loss)

    best  = tf.argmax(tf.reshape(y[:, 1:], [-1, 7, len(common.CHARS)]), 2)
    correct = tf.argmax(tf.reshape(y_[:, 1:], [-1, 7, len(common.CHARS)]), 2)

    if initial_weights is not None:
        assert len(params) == len(initial_weights)
        assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)]

    init = tf.initialize_all_variables()

    def vec_to_plate(v):
        return "".join(common.CHARS[i] for i in v)

    def do_report():
        r = sess.run([best,
                      correct,
                      tf.greater(y[:, 0], 0),
                      y_[:, 0],
                      digits_loss,
                      presence_loss,
                      loss],
                     feed_dict={x: test_xs, y_: test_ys})
        num_correct = numpy.sum(
                        numpy.logical_or(
                            numpy.all(r[0] == r[1], axis=1),
                            numpy.logical_and(r[2] < 0.5,
                                              r[3] < 0.5)))
        r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190])
        for b, c, pb, pc in zip(*r_short):
            print "{} {} <-> {} {}".format(vec_to_plate(c), pc,
                                           vec_to_plate(b), float(pb))
        num_p_correct = numpy.sum(r[2] == r[3])

        print ("B{:3d} {:2.02f}% {:02.02f}% loss: {} "
               "(digits: {}, presence: {}) |{}|").format(
            batch_idx,
            100. * num_correct / (len(r[0])),
            100. * num_p_correct / len(r[2]),
            r[6],
            r[4],
            r[5],
            "".join("X "[numpy.array_equal(b, c) or (not pb and not pc)]
                                           for b, c, pb, pc in zip(*r_short)))

    def do_batch():
        sess.run(train_step,
                 feed_dict={x: batch_xs, y_: batch_ys})
        if batch_idx % report_steps == 0:
            do_report()

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        if initial_weights is not None:
            sess.run(assign_ops)

        test_xs, test_ys = unzip(list(read_data("test/*.png"))[:50])

        try:
            last_batch_idx = 0
            last_batch_time = time.time()
            batch_iter = enumerate(read_batches(batch_size))
            for batch_idx, (batch_xs, batch_ys) in batch_iter:
                do_batch()
                if batch_idx % report_steps == 0:
                    batch_time = time.time()
                    if last_batch_idx != batch_idx:
                        print "time for 60 batches {}".format(
                            60 * (last_batch_time - batch_time) /
                                            (last_batch_idx - batch_idx))
                        last_batch_idx = batch_idx
                        last_batch_time = batch_time

        except KeyboardInterrupt:
            last_weights = [p.eval() for p in params]
            numpy.savez("weights.npz", *last_weights)
            return last_weights