def produce_semantic_neighbourhood(config_file):
    """Function that produces semantic word neighbourhoods from a semantic resource.
    """
    try:
        config = settings.Config(config_file)
        config.misc.independent_component = True
        config.misc.deserialization_allowed = False
        config.spreading_activation = config.spreading_activation[0], 0.5
        semres = SemanticResource.create(config)
    except:
        print("Problematic configuration in {}".format(config_file))

    all_concepts = semres.get_all_available_concepts()
    info("Got a total of {} concepts".format(semres.name))
    neighbours = {}
    info("Getting semantic neighbours for up to {} steps ".format(semres.spread_steps))
    with tqdm.tqdm(total=len(all_concepts), ascii=True) as pbar:
        for concept in all_concepts:
            sr = semres.spread_activation([concept], semres.spread_steps, 1)
            if sr:
                clear_name = semres.get_clear_concept_word(concept)
                if clear_name not in neighbours:
                    neighbours[clear_name] = {}
                items = list(sr.items())
                step_index = 0
                while items:
                    max_w = max(items, key=lambda x: x[1])[1]
                    idxs = [i for i in range(len(items)) if items[i][1] == max_w]
                    if step_index not in neighbours[clear_name]:
                        neighbours[clear_name][step_index] = []
                    neighbours[clear_name][step_index].extend([semres.get_clear_concept_word(items[i][0]) for i in idxs])
                    items = [items[j] for j in range(len(items)) if j not in idxs]
                    step_index += 1
            pbar.update()

    # write marginal
    for step in range(semres.spread_steps):
        outfile = "{}.neighbours.step_{}.txt".format(semres.base_name, step + 1)
        info("Writing to {}".format(outfile))
        with open(outfile, "w") as f:
            for concept in neighbours:
                if step in neighbours[concept]:
                    neighs = list(set(neighbours[concept][step]))
                    f.write("{} {}\n".format(concept, " ".join(neighs)))
    # write total
    outfile = "{}.neighbours.total.txt".format(semres.base_name, step + 1)
    info("Writing to {}".format(outfile))
    with open(outfile, "w") as f:
        for c, concept in enumerate(neighbours):
            neighs = []
            for step in range(semres.spread_steps):
                if step in neighbours[concept]:
                    neighs.extend(neighbours[concept][step])
            neighs = list(set(neighs))
            f.write("{} {}\n".format(concept, " ".join(neighs)))
Beispiel #2
0
    # We launch a Session to test the exported file
    with tf.Session(config=my_config, graph=graph) as sess:

        test_suite = TestClass(graph, sess, 'freeze_graph')
        dataset = test_suite.create_dataset(cfg, image_paths)
        test_suite.run(
            {"input_tensor": 'prefix/input/x:0', "is_train_tensor": "prefix/input/is_train:0",
             "mask_tensor": "prefix/output/y_pred:0", "score_tensor": "prefix/output/score:0"},
            dataset, OUTPUT_PATH
        )


if __name__ == '__main__':

    base_cfg = settings.Config()
    sm = SettingsManager.get(base_cfg.get_options(), base_cfg.__dict__)
    parser = argparse.ArgumentParser(description="Export script")
    parser.add_argument('--model_id', dest='model_id', help='model_id for reading saved checkpoint', required=True)
    parser.add_argument('--target', dest='target', choices=['freeze_graph', 'saved_model'],
                        default='freeze_graph',
                        help='target for export: freeze_graph for REST API, saved_model for TF-Serving ',
                        required=True)

    args = parser.parse_args()
    model_id = args.model_id
    target = args.target

    sm.update({'model_id': model_id})
    ckpt_dir = os.path.join(base_cfg.ck_dir, model_id)
    if not os.path.exists(ckpt_dir):
Beispiel #3
0
import sys
import sensor
import serial
import requests
import os
import eventlet
import settings

eventlet.monkey_patch()

app = Flask(__name__)
# Set up encryption
app.config['DEBUG'] = False

socketio = SocketIO(app)
config = settings.Config()

MAT_SERIAL_IDENTIFIER = "M"
LIGHT_SERIAL_IDENTIFIER = "L"
ZERO_CROSS_IDENTIFIER = "Z"

ON = 1
OFF = 0

MAT_TEMPERATURE_APPROACH_DELTA_LIMIT = 0.12
AMBIENT_TEMPERATURE_APPROACH_DELTA_LIMIT = 0.2

display = Display()
probe = Probe(config.get('probe', 'READ_DIRECTORY'), 'Probe')
dht1_temp = DHT22(gpio.DHT_SENSOR1_PIN, 1, 'Sensor1 (temperature)')
dht2_humidity = DHT22(gpio.DHT_SENSOR2_PIN, 2, 'Sensor2 (humidity)')
Beispiel #4
0
    return 0


def args_parser(args):
    args = dict(a.split("=") for a in args)
    assert "models" in args.keys()
    args["models"] = args["models"].split(",")
    if "rounds" in args.keys():
        args["rounds"] = int(args["rounds"])
    if "epochs" in args.keys():
        args["epochs"] = int(args["epochs"])
    return args


if __name__ == "__main__":

    import os
    if not os.path.exists('./log'):
        os.mkdir('./log')
    if not os.path.exists('./models'):
        os.mkdir('./models')

    args = args_parser(sys.argv[1:])

    for m in args["models"]:
        config = settings.Config(rounds=args["rounds"],
                                 epochs=args["epochs"],
                                 arch=m,
                                 name=m)
        train(config=config)
Beispiel #5
0
def main(argv):

    base_cfg = settings.Config()
    sm = SettingsManager.get(base_cfg.get_options(), base_cfg.__dict__)
    args_dict = sm.parse_cmd()
    sm.update(args_dict)

    # handle all the possible cases to start/resume/restart training
    if not args_dict['model_id']:
        model_id = datetime.now().strftime("%Y%m%d") + '.' + sm.get_hash(
            select_keys=base_cfg.get_hash_keys())
        sm.update({'model_id': model_id})
        ckpt_dir = os.path.join(base_cfg.ck_dir, model_id)

        if os.path.exists(ckpt_dir):
            # restart training
            if args_dict['restart']:
                logger.info(
                    "Delete old checkpoint dir {} and restart from scratch".
                    format(ckpt_dir))
                rm_dirs = [ckpt_dir, os.path.join(base_cfg.log_dir, model_id)]
                for rm_dir in rm_dirs:
                    try:
                        shutil.rmtree(rm_dir)
                    except OSError as err:
                        logger.info("OS error: {0}".format(err))
            else:
                # special case: happens when you run training with same default settings within same day.
                # Hash gives same model_id, so just resume what you had
                logger.info(
                    "Checkpoint dir {} already exists for model {}. Will try to resume if checkpoint exists"
                    .format(ckpt_dir, model_id))

        else:
            # start new training
            logger.info(
                "Start new training in checkpoint dir {}".format(ckpt_dir))
    else:
        model_id = args_dict['model_id']
        ckpt_dir = os.path.join(base_cfg.ck_dir, model_id)
        if not os.path.exists(ckpt_dir):
            logger.info(
                "Provided model {} does not exist. Can't resume/restart something that doesn't exist"
                .format(model_id))
            sys.exit(0)

        # resume training and ignore command line settings
        logger.info(
            "Resume from old checkpoint dir {} with stored parameters".format(
                ckpt_dir))
        json_dict = json.loads(
            open(os.path.join(ckpt_dir, 'info.json')).read())
        stored_dict = merge_dicts(json_dict['params'], json_dict['dataset'])
        # given_dict = sm.__dict__()
        # # checking if cmd parameters are contained in stored parameters
        # if not dict_in_another(stored_dict, given_dict):
        #     logger.info("Stored settings don't match the command line settings. Using stored settings")
        sm.update(stored_dict)

    # create cfg class
    cfg = dict_to_obj(sm.__dict__())

    # create info dict
    info = sm.get_dict(base_cfg.get_common_keys())
    info.update({
        'params': sm.get_dict(base_cfg.get_params_keys()),
        'dataset': sm.get_dict(base_cfg.get_dataset_keys()),
        'metrics': {},
    })
    cfg.height = cfg.dim
    cfg.width = cfg.dim

    seg = Segmentation(cfg, info)

    if cfg.phase == 'train':
        try:
            seg.train()
        except:
            extype, value, tb = sys.exc_info()
            traceback.print_exc()
            pdb.post_mortem(tb)

    if cfg.phase == 'test':
        seg.test()