Пример #1
0
    def __init__(self, path):
        BaseConfig.__init__(self, config_path)
        # QObject.__init__()

        self.fileHash = hashlib.md5(str(self.keys).encode('utf-8')).hexdigest()
        self.watcher = QFileSystemWatcher([path])
        self.watcher.fileChanged.connect(self._on_file_changed)
Пример #2
0
    def __init__(self, path):
        BaseConfig.__init__(self, CONFIG_PATH)
        # QObject.__init__()

        self.watcher = QFileSystemWatcher([os.path.dirname(path), path])
        self.watcher.fileChanged.connect(self._on_file_changed)
        self.watcher.directoryChanged.connect(self._on_file_changed)
Пример #3
0
    def __init__(self, path):
        BaseConfig.__init__(self, CONFIG_PATH)
        # QObject.__init__()

        self.watcher = QFileSystemWatcher([os.path.dirname(path), path])
        self.watcher.fileChanged.connect(self._on_file_changed)
        self.watcher.directoryChanged.connect(self._on_file_changed)
Пример #4
0
    def __init__(self):
        BaseConfig.__init__(self, config_path)
        # GObject.GObject.__init__()

        # TODO: Gio's monitoring is a bit slow
        f = Gio.File.new_for_path(config_path)
        self.monitor = f.monitor_file(0, None)
        self.monitor.connect("changed", self._on_file_changed)
Пример #5
0
    def __init__(self):
        BaseConfig.__init__(self, config_path)
        # GObject.GObject.__init__()

        # TODO: Gio's monitoring is a bit slow
        f = Gio.File.new_for_path(config_path)
        self.monitor = f.monitor_file(0, None)
        self.monitor.connect("changed", self._on_file_changed)
Пример #6
0
 def __init__(self, filename, root_dir, mod_dir_name):
     self.filename = filename
     self.root_dir = root_dir
     self.mod_dir_name = mod_dir_name
     self.mod_dir = os.path.abspath(os.path.join(self.filename, ".."))
     with open(filename, encoding="utf-8") as file:
         self.json = json.load(file)
     BaseConfig.__init__(self, self.json)
Пример #7
0
    def test_empty_file(self):
        # If the file is empty then BaseConfig should write
        # some default value in there
        config = BaseConfig(path=self.config_file.name)
        content = self.config_file.file.read()

        assert len(content) != 0
Пример #8
0
 def init_config(self):
     self.config = BaseConfig('model2')
     self.config.wit_hook = False
     self.config.n_epoch_eval = 1
     self.config.max_steps = 10
     self.config.shuffle_and_repeat = False
     self.config.n_batch_train = 25  # for train
     self.config.n_epoch_train = 25  # for train
Пример #9
0
    def test_file_missing_key(self):
        # BaseConfig should use default_config.json to
        # fill in missing config keys from the user config file
        
        self.config_file.file.write("{}")
        self.config_file.file.flush()
        config = BaseConfig(path=self.config_file.name)

        # Should not raise an exception here
        config["auto-capitalize-expansion"]
Пример #10
0
    def test_telex(self):
        """
        It should pass telex-specific options to the definition generator.
        """
        c = BaseConfig(path=self.config_file.name)
        c["telex-w-shorthand"] = False
        c["telex-brackets-shorthand"] = False

        d = c["input-method-definition"]
        assert not "<ư" in d["w"]
        assert not "[" in d
Пример #11
0
    def init_config(self):
        self.config = BaseConfig('model')
        self.config.wit_hook = True
        self.config.n_epoch_train = 1
        self.config.n_epoch_eval = 1
        self.config.max_steps = 100000
        self.config.shuffle_and_repeat = False
        self.config.n_batch_train = 25  # for train
        self.config.n_epoch_train = 25  # for train

        self.config.save_checkpoints_secs = 10
        self.config.stop_if_no_increase_hook_max_steps_without_increase = 1000
        self.config.stop_if_no_increase_hook_min_steps = 3500
Пример #12
0
    def test_input_method_definition(self):
        """
        It should generate input method definition based on the current
        input method.
        """

        c = BaseConfig(path=self.config_file.name)
        d = c["input-method-definition"]
        assert len(d) != 0

        c["input-method"] = "vni"
        d = c["input-method-definition"]
        assert len(d) != 0
Пример #13
0
def import_build_config(make_file, source, destination):
    global root_files
    root_files.append("build.config")

    build_config = os.path.join(source, "build.config")
    with open(build_config, "r", encoding="utf-8") as config_file:
        config_obj = json.loads(config_file.read())
        config = BaseConfig(config_obj)
        make_file["global"]["api"] = config.get_value("defaultConfig.api",
                                                      "CoreEngine")

        src_dir = os.path.join(destination, "src")

        # clear assets folder
        assets_dir = os.path.join(src_dir, "assets")
        clear_directory(assets_dir)
        os.makedirs(assets_dir)

        # some pre-defined resource folders
        resources = [{
            "path": "src/assets/resource_packs/*",
            "type": "minecraft_resource_pack"
        }, {
            "path": "src/assets/behavior_packs/*",
            "type": "minecraft_behavior_pack"
        }]

        os.makedirs(os.path.join(assets_dir, "resource_packs"))
        os.makedirs(os.path.join(assets_dir, "behavior_packs"))

        # import assets
        for res_dir in config.get_filtered_list("resources", "resourceType",
                                                ("resource", "gui")):
            if res_dir["resourceType"] == "resource":
                res_dir["resourceType"] = "resource_directory"
            path_stripped = res_dir["path"].strip('/')
            path_parts = path_stripped.split('/')
            path = os.path.join(*path_parts)
            copy_directory(os.path.join(source, path),
                           os.path.join(assets_dir, path), True)
            resources.append({
                "path": "src/assets/" + path_stripped,
                "type": res_dir["resourceType"]
            })

            root_files.append(path_parts[0])

        make_file["resources"] = resources

        # clear libraries folder and copy libraries from the old project
        libs_dir = os.path.join(destination, "src", "lib")
        clear_directory(libs_dir)
        clear_directory(os.path.join(destination, "src", "dev"))
        os.makedirs(libs_dir)
        old_libs = config.get_value("defaultConfig.libraryDir",
                                    "lib").strip('/')
        old_libs_parts = old_libs.split('/')
        old_libs_dir = os.path.join(source, *old_libs_parts)
        if os.path.isdir(old_libs_dir):
            root_files.append(old_libs_parts[0])
            copy_directory(old_libs_dir, libs_dir)

        # some pre-defined source folders
        sources = [{
            "source": "src/lib/*",
            "type": "library",
            "language": "javascript"
        }, {
            "source": "src/preloader/*",
            "type": "preloader",
            "language": "javascript"
        }]

        ensure_directory(os.path.join(src_dir, "preloader"))

        # import sources
        for source_dir in config.get_filtered_list("compile", "sourceType",
                                                   ("mod", "launcher")):
            if source_dir["sourceType"] == "mod":
                source_dir["sourceType"] = "main"

            sourceObj = {
                "type": source_dir["sourceType"],
                "language": "javascript"
            }

            source_parts = source_dir["path"].split('/')
            root_files.append(source_parts[0])

            build_dirs = config.get_filtered_list("buildDirs", "targetSource",
                                                  (source_dir["path"]))
            if (len(build_dirs) > 0):
                old_build_path = build_dirs[0]["dir"].strip("/")
                old_path_parts = old_build_path.split('/')
                sourceObj["source"] = "src/" + old_build_path
                sourceObj["target"] = source_dir["path"]
                root_files.append(old_path_parts[0])

                copy_directory(os.path.join(source, *old_path_parts),
                               os.path.join(src_dir, *old_path_parts), True)

            else:
                sourceObj["source"] = "src/" + source_dir["path"]
                copy_file(os.path.join(source, *source_parts),
                          os.path.join(src_dir, *source_parts))

            sources.append(sourceObj)

        make_file["sources"] = sources
        return
    exit("unable to read build.config")
def main(argv):

    cfg = BaseConfig().parse(argv)
    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu

    img_generator_class = locate(cfg.db_tuple_loader)
    args = dict()
    args['db_path'] = cfg.db_path
    args['tuple_loader_queue_size'] = cfg.tuple_loader_queue_size
    args['preprocess_func'] = cfg.preprocess_func
    args['batch_size'] = cfg.batch_size
    args['shuffle'] = False
    args['img_size'] = const.max_frame_size
    args['gen_hot_vector'] = True
    args['csv_file'] = cfg.train_csv_file
    train_iter = img_generator_class(args)

    args['csv_file'] = cfg.test_csv_file
    val_iter = img_generator_class(args)

    train_imgs, train_lbls = train_iter.imgs_and_lbls()
    val_imgs, val_lbls = val_iter.imgs_and_lbls()

    # Where to save the trained model
    save_model_dir = cfg.checkpoint_dir
    model_basename = os.path.basename(save_model_dir)
    touch_dir(save_model_dir)


    ## Log experiment
    args_file = os.path.join(cfg.checkpoint_dir, 'args.json')
    with open(args_file, 'w') as f:
        json.dump(vars(cfg), f, ensure_ascii=False, indent=2, sort_keys=True)
    # os_utils.touch_dir(save_model_dir)

    log_file = os.path.join(cfg.checkpoint_dir, cfg.log_filename + '.txt')
    os_utils.touch_dir(cfg.checkpoint_dir)

    logger = log_utils.create_logger(log_file)


    with tf.Graph().as_default():

        # Create train and val dataset following tensorflow Data API
        ## A dataset element has an image and lable
        train_dataset = TensorflowTupleLoader(train_imgs, train_lbls,cfg, is_training=True).dataset
        val_dataset = TensorflowTupleLoader(val_imgs, val_lbls,cfg, is_training=False, batch_size=cfg.batch_size,
                                       repeat=False).dataset

        handle = tf.placeholder(tf.string, shape=[])

        iterator = tf.data.Iterator.from_string_handle(
            handle, train_dataset.output_types, train_dataset.output_shapes)
        images_ph, lbls_ph = iterator.get_next()

        training_iterator = train_dataset.make_one_shot_iterator()
        validation_iterator = val_dataset.make_initializable_iterator()

        ## Load a pretrained network {resnet_v2 or densenet161} based on config.network_name configuration
        network_class = locate(cfg.network_name)
        model = network_class(cfg, is_training=True, images_ph=images_ph, lbls_ph=lbls_ph)


        trainable_vars = tf.trainable_variables()
        if cfg.caffe_iter_size > 1:  ## Accumulated Gradient
            ## Creation of a list of variables with the same shape as the trainable ones
            # initialized with 0s
            accum_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in trainable_vars]
            zero_ops = [tv.assign(tf.zeros_like(tv)) for tv in accum_vars]

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):

            global_step = tf.Variable(0, name='global_step', trainable=False)
            learning_rate = tf_utils.poly_lr(global_step,cfg)
            optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)

            if cfg.caffe_iter_size > 1:  ## Accumulated Gradient

                grads = optimizer.compute_gradients(model.train_loss, trainable_vars)
                # Adds to each element from the list you initialized earlier with zeros its gradient (works because accum_vars and gvs are in the same order)
                accum_ops = [accum_vars[i].assign_add(gv[0]) for i, gv in enumerate(grads)]
                iter_size = cfg.caffe_iter_size
                # Define the training step (part with variable value update)
                train_op = optimizer.apply_gradients([(accum_vars[i] / iter_size, gv[1]) for i, gv in enumerate(grads)],
                                                     global_step=global_step)

            else: # If accumulated gradient disabled, do regular training

                grads = optimizer.compute_gradients(model.train_loss)
                train_op = optimizer.apply_gradients(grads, global_step=global_step)

        # logger.info('=========================================================')
        # for v in tf.trainable_variables():
        #     mprint('trainable_variables:  {0} \t {1}'.format(str(v.name),str(v.shape)))


        sess = tf.InteractiveSession()
        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()

        training_handle = sess.run(training_iterator.string_handle())
        validation_handle = sess.run(validation_iterator.string_handle())


        # now = datetime.now()
        # if (config.tensorbaord_file == None):
        #     tb_path = config.tensorbaord_dir + now.strftime("%Y%m%d-%H%M%S")
        # else:
        #     tb_path = config.tensorbaord_dir + config.tensorbaord_file

        start_iter = 1 # No Resume in this code version

        # train_writer = tf.summary.FileWriter(tb_path, sess.graph)

        saver = tf.train.Saver()  # saves variables learned during training

        ckpt_file = os.path.join(save_model_dir, cfg.checkpoint_filename)
        print('Model Path ', ckpt_file)



        load_model_msg = model.load_model(save_model_dir, ckpt_file, sess, saver, is_finetuning=True)
        logger.info(load_model_msg)


        val_loss = tf.summary.scalar('Val_Loss', model.val_loss)
        val_acc_op = tf.summary.scalar('Batch_Val_Acc', model.val_accuracy)
        model_acc_op = tf.summary.scalar('Split_Val_Accuracy', model.val_accumulated_accuracy)

        logger.info('Start Training ***********')
        best_acc = 0
        best_model_step = 0
        for current_iter in range(start_iter, cfg.train_iters+1):
            start_time_train = time.time()
            feed_dict = {handle: training_handle}

            ## Here is where training and backpropagation start

            # In case accumulated gradient enabled, i.e. config.caffe_iter_size > 1
            for mini_batch in range(cfg.caffe_iter_size - 1):
                sess.run(accum_ops, feed_dict)


            model_loss_value, accuracy_value, _ = sess.run([model.train_loss, model.train_accuracy, train_op],
                                                           feed_dict)

            # In case accumulated gradient enabled, reset shadow variables
            if cfg.caffe_iter_size > 1:
                sess.run(zero_ops)

            ## Here is where training and backpropagation end

            train_time = time.time() - start_time_train


            if (current_iter % cfg.logging_threshold == 0 or current_iter ==1):
                logger.info(
                    'i {0:04d} loss {1:4f} Acc {2:2f} Batch Time {3:3f}'.format(current_iter, model_loss_value, accuracy_value,
                                                                                train_time))

                if (current_iter % cfg.test_interval == 0):
                    # run_metadata = tf.RunMetadata()

                    tf.local_variables_initializer().run()
                    sess.run(validation_iterator.initializer)

                    while True:
                        try:
                            feed_dict = {handle: validation_handle}
                            val_loss_op, batch_accuracy, accuracy_op, _val_acc_op, _val_acc, c_cnf_mat = sess.run(
                                [val_loss, model.val_accuracy, model_acc_op, val_acc_op, model.val_accumulated_accuracy,
                                 model.val_confusion_mat], feed_dict)
                        except tf.errors.OutOfRangeError:
                            logger.info('Val Acc {0}'.format(_val_acc))
                            break



                    # train_writer.add_run_metadata(run_metadata, 'step%03d' % current_iter)
                    # train_writer.add_summary(val_loss_op, current_iter)
                    # train_writer.add_summary(_val_acc_op, current_iter)
                    # train_writer.add_summary(accuracy_op, current_iter)
                    #
                    # train_writer.flush()


                    if (current_iter % cfg.logging_threshold == 0):
                        saver.save(sess, ckpt_file)
                        if best_acc < _val_acc:
                            saver.save(sess, ckpt_file + 'best')
                            best_acc = _val_acc
                            best_model_step = current_iter
                        ## Early dropping style.
                        logger.info('Best Acc {0} at {1} == {2}'.format(best_acc, best_model_step, model_basename))

        saver.save(sess, ckpt_file)  ## Save final ckpt before closing
        sess.close()
Пример #15
0
push_config = {}
currency_config = {}
ggzj_config = {}
awake_config = {}
skill_peerless_effect_config = {}
skill_peerless_grade_config = {}
guild_task_config = {}
guild_skill_config = {}
features_open_config = {}
stage_show_config = {}

all_config_name = {
    'activity_type_config': ActivityTypeConfig(),
    'lottery_config': LotteryConfig(),
    'travel_item_group_config': TravelItemGroupConfig(),
    'base_config': BaseConfig(),
    'hero_config': HeroConfig(),
    'hero_exp_config': HeroExpConfig(),
    'item_config': ItemsConfig(),
    'small_bag_config': SmallBagsConfig(),
    'big_bag_config': BigBagsConfig(),
    'equipment_config': EquipmentConfig(),
    'equipment_strengthen_config': EquipmentStrengthenConfig(),
    'set_equipment_config': SetEquipmentConfig(),
    'equipment_attribute_config': EquipmentAttributeConfig(),
    'chip_config': ChipConfig(),
    'shop_config': ShopConfig(),
    'shop_type_config': ShopTypeConfig(),
    'link_config': LinkConfig(),
    'stage_config': StageConfig(),
    'monster_config': MonsterConfig(),