Exemple #1
0
    def select_serial_port(self, action):
        # un-check all menu actions
        for item in self.serial_port_action_list:
            item.setChecked(False)

        # check selected serial port action
        action.setChecked(True)
        self.serial_port = action.text()
        save_value('serial', 'port', self.serial_port)
Exemple #2
0
def train(M, src=None, trg=None, has_disc=True, saver=None, model_name=None):
    """Main training function

    Creates log file, manages datasets, trains model

    M          - (TensorDict) the model
    src        - (obj) source domain. Contains train/test Data obj
    trg        - (obj) target domain. Contains train/test Data obj
    has_disc   - (bool) whether model requires a discriminator update
    saver      - (Saver) saves models during training
    model_name - (str) name of the model being run with relevant parms info
    """
    # Training settings
    bs = 64
    iterep = 1000
    itersave = 20000
    n_epoch = 80
    epoch = 0
    feed_dict = {}

    # Create a log directory and FileWriter
    log_dir = os.path.join(args.logdir, model_name)
    delete_existing(log_dir)
    train_writer = tf.summary.FileWriter(log_dir)

    # Create a save directory
    if saver:
        model_dir = os.path.join('checkpoints', model_name)
        delete_existing(model_dir)
        os.makedirs(model_dir)

    # Replace src domain with psuedolabeled trg
    if args.dirt > 0:
        print("Setting backup and updating backup model")
        src = PseudoData(args.trg, trg, M.teacher)
        M.sess.run(M.update_teacher)

        # Sanity check model
        print_list = []
        if src:
            save_value(M.fn_ema_acc,
                       'test/src_test_ema_1k',
                       src.test,
                       train_writer,
                       0,
                       print_list,
                       full=False)

        if trg:
            save_value(M.fn_ema_acc, 'test/trg_test_ema', trg.test,
                       train_writer, 0, print_list)
            save_value(M.fn_ema_acc,
                       'test/trg_train_ema_1k',
                       trg.train,
                       train_writer,
                       0,
                       print_list,
                       full=False)

        print print_list

    if src: get_info(args.src, src)
    if trg: get_info(args.trg, trg)
    print("Batch size:", bs)
    print("Iterep:", iterep)
    print("Total iterations:", n_epoch * iterep)
    print("Log directory:", log_dir)

    for i in range(n_epoch * iterep):
        # Run discriminator optimizer
        if has_disc:
            update_dict(M, feed_dict, src, trg, bs)
            summary, _ = M.sess.run(M.ops_disc, feed_dict)
            train_writer.add_summary(summary, i + 1)

        # Run main optimizer
        update_dict(M, feed_dict, src, trg, bs)
        summary, _ = M.sess.run(M.ops_main, feed_dict)
        train_writer.add_summary(summary, i + 1)
        train_writer.flush()

        end_epoch, epoch = tb.utils.progbar(i,
                                            iterep,
                                            message='{}/{}'.format(epoch, i),
                                            display=args.run >= 999)

        # Update pseudolabeler
        if args.dirt and (i + 1) % args.dirt == 0:
            print "Updating teacher model"
            M.sess.run(M.update_teacher)

        # Log end-of-epoch values
        if end_epoch:
            print_list = M.sess.run(M.ops_print, feed_dict)

            if src:
                save_value(M.fn_ema_acc,
                           'test/src_test_ema_1k',
                           src.test,
                           train_writer,
                           i + 1,
                           print_list,
                           full=False)

            if trg:
                save_value(M.fn_ema_acc, 'test/trg_test_ema', trg.test,
                           train_writer, i + 1, print_list)
                save_value(M.fn_ema_acc,
                           'test/trg_train_ema_1k',
                           trg.train,
                           train_writer,
                           i + 1,
                           print_list,
                           full=False)

            print_list += ['epoch', epoch]
            print print_list

        if saver and (i + 1) % itersave == 0:
            save_model(saver, M, model_dir, i + 1)

    # Saving final model
    if saver:
        save_model(saver, M, model_dir, i + 1)
Exemple #3
0
def train(M, src=None, trg=None, saver=None, model_name=None):
    """Main training function
    Creates log file, manages datasets, trains model
    M          - (TensorDict) the model
    src        - (obj) source domain. Contains train/test Data obj
    trg        - (obj) target domain. Contains train/test Data obj
    saver      - (Saver) saves models during training
    model_name - (str) name of the model being run with relevant parms info
    """
    # Training settings
    bs = 64
    iterep = 1000
    iterviz = 5000 if args.run < 999 else 1000
    itersave = 20000
    n_epoch = 200
    epoch = 0
    feed_dict = {}

    # Create a log directory and FileWriter
    log_dir = os.path.join(args.logdir, model_name)
    delete_existing(log_dir)
    train_writer = tf.summary.FileWriter(log_dir)

    # Create a save directory
    if saver:
        model_dir = os.path.join('checkpoints', model_name)
        delete_existing(model_dir)
        os.makedirs(model_dir)

    if src: get_info(args.src, src)
    if trg: get_info(args.trg, trg)
    print "Batch size:", bs
    print "Iterep:", iterep
    print "Total iterations:", n_epoch * iterep
    print "Log directory:", log_dir

    for i in xrange(n_epoch * iterep):
        # Run main optimizer
        update_dict(M, feed_dict, src, trg, bs)
        summary, _ = M.sess.run(M.ops_main, feed_dict)
        train_writer.add_summary(summary, i + 1)
        train_writer.flush()

        end_epoch, epoch = tb.utils.progbar(i, iterep,
                                            message='{}/{}'.format(epoch, i),
                                            display=args.run >= 999)

        # Log end-of-epoch values
        if end_epoch:
            print_list = M.sess.run(M.ops_print, feed_dict)

            if src:
                save_value(M.fn_loss, 'test/src_loss',
                           src.test,  train_writer, i + 1, print_list, full=False)

            if trg:
                save_value(M.fn_loss, 'test/trg_loss',
                           trg.test,  train_writer, i + 1, print_list, full=False)

            print_list += ['epoch', epoch]
            print print_list

        # Visualize images
        if (i + 1) % iterviz == 0 and getattr(M, 'ops_image', None) is not None:
            summary = M.sess.run(M.ops_image)
            train_writer.add_summary(summary, i + 1)

        if saver and (i + 1) % itersave == 0:
            save_model(saver, M, model_dir, i + 1)

    # Saving final model
    if saver:
        save_model(saver, M, model_dir, i + 1)
Exemple #4
0
def train(M, src=None, trg=None, has_disc=True, saver=None, model_name=None):
    """Main training function

    Creates log file, manages datasets, trains model

    M          - (TensorDict) the model
    src        - (obj) source domain. Contains train/test Data obj
    trg        - (obj) target domain. Contains train/test Data obj
    has_disc   - (bool) whether model requires a discriminator update
    saver      - (Saver) saves models during training
    model_name - (str) name of the model being run with relevant parms info
    """
    # Training settings
    bs = 64
    iterep = 1000
    itersave = 20000
    n_epoch = 80
    epoch = 0
    feed_dict = {}

    # Create a log directory and FileWriter
    log_dir = os.path.join(args.logdir, model_name)
    delete_existing(log_dir)
    train_writer = tf.summary.FileWriter(log_dir)

    # Create a save directory
    if saver:
        model_dir = os.path.join('checkpoints', model_name)
        delete_existing(model_dir)
        os.makedirs(model_dir)

    # Replace src domain with psuedolabeled trg
    if args.dirt > 0:
        print "Setting backup and updating backup model"
        src = PseudoData(args.trg, trg, M.teacher)
        M.sess.run(M.update_teacher)

        # Sanity check model
        print_list = []
        if src:
            save_value(M.fn_ema_acc, 'test/src_test_ema_1k',
                     src.test,  train_writer, 0, print_list, full=False)

        if trg:
            save_value(M.fn_ema_acc, 'test/trg_test_ema',
                     trg.test,  train_writer, 0, print_list)
            save_value(M.fn_ema_acc, 'test/trg_train_ema_1k',
                     trg.train, train_writer, 0, print_list, full=False)

        print print_list

    if src: get_info(args.src, src)
    if trg: get_info(args.trg, trg)
    print "Batch size:", bs
    print "Iterep:", iterep
    print "Total iterations:", n_epoch * iterep
    print "Log directory:", log_dir

    for i in xrange(n_epoch * iterep):
        # Run discriminator optimizer
        if has_disc:
            update_dict(M, feed_dict, src, trg, bs)
            summary, _ = M.sess.run(M.ops_disc, feed_dict)
            train_writer.add_summary(summary, i + 1)

        # Run main optimizer
        update_dict(M, feed_dict, src, trg, bs)
        summary, _ = M.sess.run(M.ops_main, feed_dict)
        train_writer.add_summary(summary, i + 1)
        train_writer.flush()

        end_epoch, epoch = tb.utils.progbar(i, iterep,
                                            message='{}/{}'.format(epoch, i),
                                            display=args.run >= 999)

        # Update pseudolabeler
        if args.dirt and (i + 1) % args.dirt == 0:
            print "Updating teacher model"
            M.sess.run(M.update_teacher)

        # Log end-of-epoch values
        if end_epoch:
            print_list = M.sess.run(M.ops_print, feed_dict)

            if src:
                save_value(M.fn_ema_acc, 'test/src_test_ema_1k',
                         src.test,  train_writer, i + 1, print_list, full=False)

            if trg:
                save_value(M.fn_ema_acc, 'test/trg_test_ema',
                         trg.test,  train_writer, i + 1, print_list)
                save_value(M.fn_ema_acc, 'test/trg_train_ema_1k',
                         trg.train, train_writer, i + 1, print_list, full=False)

            print_list += ['epoch', epoch]
            print print_list

        if saver and (i + 1) % itersave == 0:
            save_model(saver, M, model_dir, i + 1)

    # Saving final model
    if saver:
        save_model(saver, M, model_dir, i + 1)
Exemple #5
0
def train(M, src=None, trg=None, has_disc=True, saver=None, model_name=None):
    """Main training function

    Creates log file, manages datasets, trains model

    M          - (TensorDict) the model
    src        - (obj) source domain. Contains train/test Data obj
    trg        - (obj) target domain. Contains train/test Data obj
    has_disc   - (bool) whether model requires a discriminator update
    saver      - (Saver) saves models during training
    model_name - (str) name of the model being run with relevant parms info
    """
    # Training settings
    iterep = 1000
    itersave = 20000
    n_epoch = 200
    epoch = 0
    feed_dict = {}

    # Create a log directory and FileWriter
    log_dir = os.path.join(args.logdir, model_name)
    delete_existing(log_dir)
    train_writer = tf.summary.FileWriter(log_dir)

    # Create a directory to save generated images
    gen_img_path = os.path.join(args.gendir, model_name)
    delete_existing(gen_img_path)
    os.makedirs(gen_img_path)

    # Create a save directory
    if saver:
        model_dir = os.path.join('checkpoints', model_name)
        delete_existing(model_dir)
        os.makedirs(model_dir)

    # Replace src domain with psuedolabeled trg
    if args.dirt > 0:
        print "Setting backup and updating backup model"
        src = PseudoData(args.trg, trg, M.teacher)
        M.sess.run(M.update_teacher)

        # Sanity check model
        print_list = []
        if src:
            save_value(M.fn_ema_acc,
                       'test/src_test_ema_1k',
                       src.test,
                       train_writer,
                       0,
                       print_list,
                       full=False)

        if trg:
            save_value(M.fn_ema_acc, 'test/trg_test_ema', trg.test,
                       train_writer, 0, print_list)
            save_value(M.fn_ema_acc,
                       'test/trg_train_ema_1k',
                       trg.train,
                       train_writer,
                       0,
                       print_list,
                       full=False)

        print print_list

    if src: get_info(args.src, src)
    if trg: get_info(args.trg, trg)
    print "Batch size:", args.bs
    print "Iterep:", iterep
    print "Total iterations:", n_epoch * iterep
    print "Log directory:", log_dir

    best_acc = -1.
    trg_acc = -1.
    for i in xrange(n_epoch * iterep):
        if has_disc:
            # Run discriminator optimizer
            update_dict(M, feed_dict, src, trg, args.bs)
            summary, _ = M.sess.run(M.ops_disc, feed_dict)
            train_writer.add_summary(summary, i + 1)

            # Run generator optimizer
            update_dict(M, feed_dict, None, trg, args.bs, noise=True)
            summary, _ = M.sess.run(M.ops_gen, feed_dict)
            train_writer.add_summary(summary, i + 1)

        # Run main optimizer
        update_dict(M, feed_dict, src, trg, args.bs, noise=True)
        summary, _ = M.sess.run(M.ops_main, feed_dict)
        train_writer.add_summary(summary, i + 1)
        train_writer.flush()

        end_epoch, epoch = tb.utils.progbar(i,
                                            iterep,
                                            message='{}/{}'.format(epoch, i),
                                            display=args.run >= 999)

        # Update pseudolabeler
        if args.dirt and (i + 1) % args.dirt == 0:
            print "Updating teacher model"
            M.sess.run(M.update_teacher)

        if (i + 1) % iterep == 0:
            gen_imgs = M.sess.run(M.trg_gen_x, feed_dict)
            manifold_h = int(np.floor(np.sqrt(args.bs)))
            manifold_w = int(np.floor(np.sqrt(args.bs)))
            visualize_results(
                gen_imgs, [manifold_h, manifold_w],
                os.path.join(gen_img_path, 'epoch_{}.png'.format(
                    (i + 1) / iterep)))

        # Log end-of-epoch values
        if end_epoch:
            print_list = M.sess.run(M.ops_print, feed_dict)

            if src:
                save_value(M.fn_ema_acc,
                           'test/src_test_ema_1k',
                           src.test,
                           train_writer,
                           i + 1,
                           print_list,
                           full=False)

            if trg:
                trg_acc = save_value(M.fn_ema_acc, 'test/trg_test_ema',
                                     trg.test, train_writer, i + 1, print_list)
                save_value(M.fn_ema_acc,
                           'test/trg_train_ema_1k',
                           trg.train,
                           train_writer,
                           i + 1,
                           print_list,
                           full=False)

            print_list += ['epoch', epoch]
            print print_list

        if saver and trg_acc > best_acc:
            print("Saving new best model")
            saver.save(M.sess, os.path.join(model_dir, 'model_best'))
            best_acc = trg_acc

    # Saving final model
    if saver:
        save_model(saver, M, model_dir, i + 1)
Exemple #6
0
 def save(self):
     name = self.parent.saveMacroNameLineEdit.text()
     save_value('macro', name, self.macroData)