Example #1
0
    def __init__(self,
                 config_folder="config",
                 settings=configuration.Config(),
                 payload=payload.Payload()):

        super(ParametersWriter, self).__init__("parameters", config_folder,
                                               settings, payload)
Example #2
0
    def __init__(self,
                 config_folder="config",
                 settings=configuration.Config(),
                 payload=payload.Payload()):

        super(WeightsWriter, self).__init__("weights", config_folder, settings,
                                            payload)
Example #3
0
def begin():
    # configurations - set the following values before executing. notes:
    #  - bit_rate should be a divisor of sample rate (44,100 hz) to avoid
    #    errors due to rounding.
    #  - frequencies f0 and f1 must be at least twice the bit_rate.

    data_length = 400
    bit_rate = 2205
    f0 = 11025
    f1 = 8820
    easy_mode = True
    last_rec = True

    # ---------------------------------------------------------------------- #

    config = cf.Config(cf.BFSK, bit_rate, f0, f1)
    testcase_file = TESTCASE_FOLDER + testcase_name(config, data_length)

    # generate a new testcase file if not already exist
    if not os.path.isfile(testcase_file) or not os.path.isfile(testcase_file +
                                                               '.wav'):
        generate_testcase(data_length, config, audio=True)

    # analyze the generated audio
    wav, sr = io.input_from_file(testcase_file + '.wav')
    # print('plotting the generated audio file')
    # pt.plot_wav_analysis(wav, sr, [f0, f1], dft=True)

    run_testcase(testcase_file,
                 duration=0,
                 use_file=easy_mode,
                 use_last_recorded=last_rec)
Example #4
0
def main(_):
    assert FLAGS.train_dir, "--train_dir is required."
    if tf.gfile.Exists(FLAGS.summaries_dir):
        tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
    tf.gfile.MakeDirs(FLAGS.summaries_dir)

    config = configuration.Config()

    dataset_eval = loader.get_split(FLAGS.split_name,
                                    dataset_dir=FLAGS.data_dir)
    if FLAGS.preprocess_abs:
        preprocess_fn = tf.abs
    else:
        preprocess_fn = None

    # whther it is a 2d input
    is_2D = common.is_2D(FLAGS.model)

    series, labels, labels_one_hot = loader.load_batch(
        dataset_eval,
        batch_size=config.batch_size,
        is_2D=is_2D,
        preprocess_fn=preprocess_fn)

    # Build lazy model
    model = common.convert_name_to_instance(FLAGS.model, config, 'eval')

    endpoints = model.build(inputs=series, is_training=False)
    predictions = tf.to_int64(tf.argmax(endpoints.logits, 1))

    slim.get_or_create_global_step()

    # Choose the metrics to compute:
    names_to_values, names_to_updates = metrics.aggregate_metric_map({
        'accuracy':
        metrics.streaming_accuracy(predictions, labels),
        'precision':
        metrics.streaming_precision(predictions, labels),
        'recall':
        metrics.streaming_recall(predictions, labels),
    })

    # Create the summary ops such that they also print out to std output:
    summary_ops = []
    for metric_name, metric_value in names_to_values.iteritems():
        op = tf.summary.scalar(metric_name, metric_value)
        op = tf.Print(op, [metric_value], metric_name)
        summary_ops.append(op)

    slim.evaluation.evaluation_loop(
        master='',
        checkpoint_dir=FLAGS.train_dir,
        logdir=FLAGS.summaries_dir,
        eval_op=names_to_updates.values(),
        num_evals=min(FLAGS.num_batches, dataset_eval.num_samples),
        eval_interval_secs=FLAGS.eval_interval_secs,
        max_number_of_evaluations=FLAGS.num_of_steps,
        summary_op=tf.summary.merge(summary_ops),
        session_config=config.session_config,
    )
Example #5
0
    def __init__(self,
                 config_folder="config",
                 settings=configuration.Config(),
                 payload=payload.Payload()):

        super(TopologyWriter, self).__init__("topology", config_folder,
                                             settings, payload)
Example #6
0
def main(args, context):

    logger = logging.getLogger(__name__)
    settings = configuration.Config()
    serializer = serialization.Serializer()

    input_data = communication.connect_socket(
        context,
        socket_type=zmq.PUB,
        connection=settings.connections["input_data"])

    logger.info("Starting data stream. Sending %d elements.", args.n)

    try:

        for i in xrange(args.n):

            logger.debug("Sending: %s", str(i))

            message = i
            message_buffer = serializer.write_buffer(message, topic="camera")
            input_data.send(message_buffer)

            time.sleep(args.s)

        logger.info("Data transfer complete.")

    except Exception as e:
        logger.exception("Failed sending camera data.")
        logger.info("Shutting down camera data stream.")
        sys.exit()

    return
def main_dnn(_):
    #tensor_board_log_dir = '/tmp/tensorflow/traffic_dnn/logs'
    # tensorboard --logdir=/tmp/tensorflow/traffic_dnn/logs

    #if tf.gfile.Exists(tensor_board_log_dir):
    #    tf.gfile.DeleteRecursively(tensor_board_log_dir)
    #tf.gfile.MakeDirs(tensor_board_log_dir)

    config_file = '../conf-005es18066/traffic_dnn-05min.conf'
    config_file = '../conf-005es18066/traffic_dnn-05min-b1152.conf'
    #config_file = '../conf-005es18066/traffic_dnn-minute10-b288.conf'
    #config_file = '../conf-005es18066/traffic_dnn-minute15-b288.conf'
    #config_file = '../conf-005es18066/traffic_dnn-minute20-b288.conf'
    #config_file = '../conf-005es18066/traffic_dnn-minute30-b288.conf'
    #config_file = '../conf-005es18066/traffic_dnn-minute60-b288.conf'

    #config_file = '../conf/traffic_dnn-05min.conf'
    #config_file = '../conf/traffic_dnn-minute10.conf'
    #config_file = '../conf/traffic_dnn-minute15.conf'
    #config_file = '../conf/traffic_dnn-minute20.conf'
    #config_file = '../conf/traffic_dnn-minute30.conf'
    #config_file = '../conf/traffic_dnn-minute60.conf'

    #hidden_layer = [16, 18, 11, 21, 12, 6]      # minute 9.97%
    #hidden_layer = [60, 60, 60, 60, 60, 60]     # minute 9.76%
    #hidden_layer = [20, 20, 20, 20, 20, 20]     # 9.82%
    hidden_layer = [15, 18, 22, 9, 5]  # 7.80% 5min

    conf = cf.Config(config_file)
    train_with_config(conf, hidden_layer, True)
Example #8
0
File: cli.py Project: CLdelisle/SPH
    def __init__(self):
        self.config = configuration.Config()
        defaults = self.config.getArg('all')
        # Set argument options for CLI
        self.parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description="SPH.py is the user interface for "
                                "our physics simulation program.\n'SPH' stands for Smoothed Particle Hydrodynamics, which is an algorithm for "
                                "simulating fluid flows.\nYou can read more on it at https://en.wikipedia.org/wiki/Smoothed-particle_hydrodynamics")

        self.parser.add_argument("-g", "--gen", help="Number of particles to generate. Conflicts with [IFILE] argument", type=int)
        self.parser.add_argument("-i", "--ifile", help="Input file path to read particles from. Takes precedence over [GEN] argument")
        self.parser.add_argument("--gtype", help="Type of particle generation to perform. Default is "+defaults['gtype'], choices=['gaussian', 'random', 'disk'], default=defaults['gtype'])
        self.parser.add_argument("-s", "--savefile", help="Output file path to write particles to. Suffix is currently "+defaults['savefile'], default=defaults['savefile'])
        self.parser.add_argument("--bound", help="Sets boundaries of particle space. Default is "+defaults['bound'], type=int, default=int(defaults['bound']))
        self.parser.add_argument("--stdev", help="Standard deviation of particle space. Default is "+defaults['stdev'], type=float, default=float(defaults['stdev']))
        self.parser.add_argument("--maxiter", help="Maximum iterations to run the simulation through. Default is "+defaults['maxiter'], type=int, default=int(defaults['maxiter']))
        self.parser.add_argument("--timestep", help="The temporal resolution of the simulation. Default is "+defaults['timestep'], type=int, default=defaults['timestep'])
        self.parser.add_argument("--t_norm", help="Time normalization. Default is "+defaults['t_norm'], choices=['months', 'years', 'decades', 'centuries'], default=defaults['t_norm'])
        self.parser.add_argument("--x_norm", help="Space normalization. Default is "+defaults['x_norm'], choices=['m', 'km', 'ly'], default=defaults['x_norm'])
        self.parser.add_argument("--kernel", help="Kernel function to use. Default is "+defaults['kernel'], choices=['gaussian', 'cubic'], default=defaults['kernel'])
        self.parser.add_argument("--smooth", help="Smoothing for the kernel function. Default is "+defaults['smooth'], type=float, default=float(defaults['smooth']))
        self.parser.add_argument("--interval", help="How many loops before particles are saved. Default is "+defaults['interval'], type=int, default=int(defaults['interval']))
        self.parser.add_argument("--mass", help="Mass of the particles in the simulation. Default is "+defaults['mass'], type=float, default=float(defaults['mass']))
        self.parser.add_argument("--mode", help="Simulation mode (parallel or serial). Default is "+defaults['mode'], choices=['serial', 'parallel'], default=defaults['mode'])
        self.parser.add_argument("-v", "--verbosity", help="Level of detail when outputting particles. Default is "+defaults['verbosity'], choices=[1,2,3], type=int, default=defaults['verbosity'])
        # Actually begin to parse the arguments
        self.args = self.parser.parse_args()
Example #9
0
	def __init__(self, config = configuration.Config(), FPGA = fpga.FPGA(), serializer = serialization.Serializer()):

		self.context = zmq.Context()
		self.settings = config
		self.FPGA = FPGA
		self.serializer = serializer
		
		self.connections = self.settings.connections
		self.read_commands = self.settings.read_commands
		self.write_commands = self.settings.write_commands
		self.topics = self.settings.topics

		self.logger = logging.getLogger("controller")

		self.logger.debug("Initializing controller ...")
		self.logger.debug("Connecting sockets ...")

		self.commander = communication.bind_socket(self.context, socket_type = zmq.REP, connection = self.connections["controller"])
		self.input_data = communication.bind_socket(self.context, socket_type = zmq.SUB, connection = self.connections["camera"])
		self.output_data = communication.bind_socket(self.context, socket_type = zmq.PUB, connection = self.connections["monitor"])
		
		#input subscribes to any topic, i.e. these sockets read from all their connections at once
		self.input_data.setsockopt(zmq.SUBSCRIBE,"")

		self.logger.debug("Initializing poll sets ...")
		# Initialize poll set
		self.poller = zmq.Poller()
		self.poller.register(self.commander, zmq.POLLIN)
		self.poller.register(self.input_data, zmq.POLLIN)
		self.logger.debug("Initialization complete.")

		self.command = None
def main(args, context):

    logger = logging.getLogger(__name__)
    settings = configuration.Config()
    serializer = serialization.Serializer()

    output_data = communication.connect_socket(
        context,
        socket_type=zmq.SUB,
        connection=settings.connections["output_data"])
    output_data.setsockopt(zmq.SUBSCRIBE, settings.topics["weights"])

    logger.info("Listening for data stream.")

    try:

        while True:

            message_buffer = output_data.recv()
            message = serializer.read_buffer(message_buffer, topic="weights")
            logger.info("Recieved: %s", message)
            time.sleep(args.s)

    except Exception as e:
        logger.exception("Listening failed.")
        logger.info("Shutting down monitor.")
        sys.exit()

    except KeyboardInterrupt as e:
        logger.debug("User interrupt.")
        logger.info("Shutting down monitor.")
        sys.exit()

    return
Example #11
0
def main():

    print("Using docker-py version %s" % docker.version)
    args, container_args = parseArguments()

    if args.socket:
        socket = args.socket
    else:
        socket = 'unix://home/docker/sockets/sandbox/docker.sock'
    # else:
    #     socket = 'unix://var/lib/docker/docker.sock'

    client = docker.APIClient(base_url=socket)
    config = configuration.Config(args, container_args, client)

    if args.debug == True:
        print("Run complete. Below is the complete config.")
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(config)

    for _, dir in config.get("appDirs").items():
        if not os.path.exists(dir):
            print("Making directory %s" % dir)
            os.makedirs(dir)
    if not os.path.exists(config.get("profileDir")):
        os.makedirs(config.get("profileDir"))
    if not os.path.exists(config.get("workDir")):
        os.makedirs(config.get("workDir"))

    print("Spawning new supervisor")
    app = supervisor.Supervisor(config, client)
    print("Running supervisor")
    app.run()
def predict(texts):
    conf = configuration.Config()
    tokenizer = BertTokenizer.from_pretrained(conf.pretrained_model_name)
    model = BertForSequenceClassification.from_pretrained(conf.pretrained_model_name, num_labels=conf.num_labels)
    model = model.to(device)

    if os.path.exists(os.path.join(conf.model_dir, conf.model_name)):
        model.load_state_dict(torch.load(os.path.join(conf.model_dir,
                                                      conf.model_name)))
    else:
        logging.info(' *** No model available. *** ')
        return

    predict_dataset = datasets.OnlineShopping(mode='single_predict',
                                              config=conf,
                                              tokenizer=tokenizer,
                                              auto_padding=True,
                                              texts=texts)

    predict_dataloader = DataLoader(predict_dataset,
                                    batch_size=len(texts),
                                    collate_fn=collate_fn)
    data = next(iter(predict_dataloader))
    tokens_tensors, segments_tensors, masks_tensors, _ = [t.to(device) if t is not None else t for t in data]
    outputs = model(input_ids=tokens_tensors,
                    token_type_ids=segments_tensors,
                    attention_mask=masks_tensors)
    print(outputs)
    probs, predictions = get_predictions(outputs, compute_acc=False)
    return dict(zip(texts, [{'result': label, 'probability': prob} for label, prob in
                            zip([predict_dataset.convert_label_id_to_value(prediction.item()) for prediction in
                                 predictions],
                                [prob.item() for prob in probs])]))
Example #13
0
    def change_config_file(self):
        config_filename = QtGui.QFileDialog.getOpenFileName(
            self, "configuration file", "", "Text files (*.ini)")

        self.config = configuration.Config(config_filename)
        self.config.set_archdrawing()
        self.config.set_database()
        self.set_config_info()
	def __init__(self, topic, sleep = 0, connection = "output_data" , settings = configuration.Config(), serializer = serialization.Serializer()):

		super(Monitor, self).__init__(topic, sleep , connection, settings, serializer)

		self.logger = logging.getLogger(topic + " monitor")
		self.logger.setLevel(logging.DEBUG)

		self.logger.info("Starting to monitor: %s", self.topic)
Example #15
0
def single_translate(english_text):
    """ 只输入一句话,即batch_size==1 """
    conf = configuration.Config()
    tokenizer = tokenization.FullTokenizer(en_vocab_file=os.path.join(conf.file_config.data_path,
                                                                      conf.file_config.en_vocab),
                                           zh_vocab_file=os.path.join(conf.file_config.data_path,
                                                                      conf.file_config.zh_vocab))
    conf.model_config.src_vocab_size = tokenizer.get_en_vocab_size() + 2
    conf.model_config.trg_vocab_size = tokenizer.get_zh_vocab_size() + 2
    model = models.Transformer(conf)
    model.to(device)

    translate_dataset = datasets.Translate(mode='single_translate',
                                           config=conf,
                                           tokenizer=tokenizer,
                                           texts=english_text)
    # encoder输入即为原始英文id
    en_ids, _ = translate_dataset[0]
    en_ids = torch.tensor(en_ids).unsqueeze(dim=0)
    # decoder的初始输出为<BOS>
    decoder_input = torch.tensor([tokenizer.get_zh_vocab_size()]).view(1, 1)   # [1, 1]

    model.load_state_dict(torch.load(os.path.join(conf.train_config.model_dir,
                                                  conf.train_config.model_name + '.pth'),
                                     map_location=device))
    # checkpoint = torch.load(os.path.join(conf.train_config.model_dir,
    #                                      conf.train_config.model_name + '_epoch_{}.tar'.format(50)),
    #                         map_location=device)
    # model.load_state_dict(checkpoint['model_state_dict'])
    model.eval()

    for i in range(51):
        if torch.cuda.is_available():
            prediction_logits = model(en_ids.cuda(),
                                      decoder_input.cuda())        # [1, i+1, vocab_size]
        else:
            prediction_logits = model(en_ids,
                                      decoder_input)
        # 取出最后一个distribution,做argmax得到预测的新字
        predictions = prediction_logits[:, -1, :]              # [batch_size, vocab_size]
        predictions = F.softmax(predictions, dim=-1)
        predict_zh_ids = torch.argmax(predictions, dim=-1)      # [batch_size]

        # 若预测出的结果是<EOS>,则结束
        if predict_zh_ids.data == tokenizer.get_zh_vocab_size() + 1:
            break
        # 否则,预测出的结果与先前的结果拼接,重新循环
        else:
            if torch.cuda.is_available():
                decoder_input = torch.cat([decoder_input.cuda(), predict_zh_ids.view(1, 1)], dim=1)
            else:
                decoder_input = torch.cat([decoder_input, predict_zh_ids.view(1, 1)], dim=1)

    # 将生成的中文id转回中文文字
    translated_text = tokenizer.convert_zh_ids_to_text(list(decoder_input.cpu().detach().numpy()[0])[1: 51])
    print('原文:', english_text)
    print('翻译:', translated_text)
    return translated_text
Example #16
0
def main():
    conf = configuration.Config()
    tokenizer = tokenization.FullTokenizer(en_vocab_file=os.path.join(conf.file_config.data_path,
                                                                      conf.file_config.en_vocab),
                                           zh_vocab_file=os.path.join(conf.file_config.data_path,
                                                                      conf.file_config.zh_vocab))
    logging.info('Using Device: {}'.format(device))
    conf.model_config.src_vocab_size = tokenizer.get_en_vocab_size() + 2
    conf.model_config.trg_vocab_size = tokenizer.get_zh_vocab_size() + 2
    model = models.Transformer(conf)
    model = model.to(device)

    if args.train:
        train_dataset = datasets.Translate(mode='train',
                                           config=conf,
                                           tokenizer=tokenizer,
                                           auto_padding=conf.train_config.auto_padding,
                                           do_filter=False)

        logging.info("***** Running training *****")
        logging.info("  Num examples = %d", len(train_dataset))
        logging.info("  Total training steps: {}".format(train_dataset.num_steps))

        train_dataloader = DataLoader(train_dataset,
                                      batch_size=conf.train_config.train_batch_size,
                                      shuffle=True,
                                      collate_fn=collate_fn)

        run(config=conf,
            dataloader=train_dataloader,
            model=model,
            mode='train',
            start_epoch=0,
            total_steps=train_dataset.num_steps)

    if args.eval:
        eval_dataset = datasets.Translate(mode='eval',
                                          config=conf,
                                          tokenizer=tokenizer,
                                          auto_padding=conf.train_config.auto_padding,
                                          do_filter=False)

        logging.info("***** Running validating *****")
        logging.info("  Num examples = %d", len(eval_dataset))
        logging.info("  Total validating steps: {}".format(eval_dataset.num_steps))

        eval_dataloader = DataLoader(eval_dataset,
                                     batch_size=conf.train_config.eval_batch_size,
                                     collate_fn=collate_fn)
        run(config=conf,
            dataloader=eval_dataloader,
            model=model,
            mode='eval',
            start_epoch=0,
            total_steps=eval_dataset.num_steps)
	def __init__(self, topic, sleep = 0, connection = "output_data" , settings = configuration.Config(), serializer = serialization.Serializer()):

		self.context = zmq.Context()
		self.settings = settings
		self.serializer = serializer
		self.sleep = sleep
		self.topic = topic

		self.logger_parent = logging.getLogger("topical monitor")
		self.logger_parent.setLevel(logging.INFO)

		self.data = communication.connect_socket(self.context, socket_type = zmq.SUB, connection = self.settings.connections[connection])
		self.data.setsockopt(zmq.SUBSCRIBE, self.settings.topics[topic])
def train():
    conf = configuration.Config()
    tokenizer = tokenization.FullTokenizer(
        vocab_file=conf.file_config.vocab_file)

    model = models.TransformerEncoder(conf)
    model = model.to(device)

    if args.train:
        train_dataset = datasets.OnlineShopping(
            mode='train',
            config=conf,
            tokenizer=tokenizer,
            auto_padding=conf.train_config.auto_padding)

        logging.info("***** Running training *****")
        logging.info("  Num examples = %d", len(train_dataset))
        logging.info("  Total training steps: {}".format(
            train_dataset.num_steps))

        train_dataloader = DataLoader(
            train_dataset,
            batch_size=conf.train_config.train_batch_size,
            shuffle=True,
            collate_fn=collate_fn)

        run(config=conf,
            dataloader=train_dataloader,
            model=model,
            mode='train',
            total_steps=train_dataset.num_steps)

    if args.dev:
        dev_dataset = datasets.OnlineShopping(
            mode='dev',
            config=conf,
            tokenizer=tokenizer,
            auto_padding=conf.train_config.auto_padding)

        logging.info("***** Running validating *****")
        logging.info("  Num examples = %d", len(dev_dataset))
        logging.info("  Total validating steps: {}".format(
            dev_dataset.num_steps))

        train_dataloader = DataLoader(
            dev_dataset,
            batch_size=conf.train_config.train_batch_size,
            collate_fn=collate_fn)

        run(config=conf, dataloader=train_dataloader, model=model, mode='eval')
def train():
    conf = configuration.Config()
    tokenizer = BertTokenizer.from_pretrained(conf.pretrained_model_name)

    # 加载bert的预训练模型。指定cache文件夹路径
    pretrained_model = os.path.join(conf.pretrained_model_path, conf.pretrained_model_name)
    if not os.path.exists(pretrained_model):
        os.mkdir(pretrained_model)
        model = BertForSequenceClassification.from_pretrained(conf.pretrained_model_name, num_labels=conf.num_labels,
                                                              cache_dir=os.path.join(pretrained_model, './cache'))
        model.save_pretrained(pretrained_model)
    else:
        model = BertForSequenceClassification.from_pretrained(pretrained_model, num_labels=conf.num_labels)
    model = model.to(device)

    if args.train:
        model.train()
        train_dataset = datasets.OnlineShopping(mode='train',
                                                config=conf,
                                                tokenizer=tokenizer,
                                                auto_padding=conf.auto_padding)

        logging.info("***** Running training *****")
        logging.info("  Num examples = %d", len(train_dataset))
        logging.info("  Total training steps: {}".format(train_dataset.num_steps))

        train_dataloader = DataLoader(train_dataset,
                                      batch_size=conf.train_batch_size,
                                      shuffle=True,
                                      collate_fn=collate_fn)

        run(config=conf, dataloader=train_dataloader, model=model, mode='train')

    if args.dev:
        model.eval()
        dev_dataset = datasets.OnlineShopping(mode='dev',
                                              config=conf,
                                              tokenizer=tokenizer,
                                              auto_padding=conf.auto_padding)

        logging.info("***** Running training *****")
        logging.info("  Num examples = %d", len(dev_dataset))
        logging.info("  Total training steps: {}".format(dev_dataset.num_steps))

        dev_dataloader = DataLoader(dev_dataset,
                                    batch_size=conf.dev_batch_size,
                                    shuffle=True,
                                    collate_fn=collate_fn)

        run(config=conf, dataloader=dev_dataloader, model=model, mode='eval')
	def __init__(self, topic, config_folder = "config", settings = configuration.Config(), payload = payload.Payload()):

		self.settings = settings
		self.payload = payload
		self.topic = topic
		self.config_folder = config_folder
		self.path = os.path.join(self.config_folder, self.topic)

		self.write_topics = ["weights", "parameters", "topology"]

		# here you put constants and variables that all the parameter writes need

		if not topic in self.write_topics:
			print "Selected topic not implemented for writing. Aborting."
			sys.exit()
	def __init__(self, topic, path, sleep = 0, connection = "output_data" , settings = configuration.Config(), serializer = serialization.Serializer()):

		super(FileLogger, self).__init__(topic, sleep , connection, settings, serializer)

		self.logger = logging.getLogger(topic + " logger")
		self.logger.setLevel(logging.DEBUG)

		self.folder = os.path.join(path, topic)
		self.log_file = os.path.join(self.folder, topic+"_store"+".txt")

		self.prepare_save(self.folder)

		self.store = open(self.log_file, "w")

		self.logger.info("Starting to log %s to file: %s", self.topic, self.log_file)
Example #22
0
def extract_scripts(filename, output_folder):
    output = ""

    print(filename)

    fin = open(os.path.join(configuration.Config().path, filename), "rb+")
    fin = (BinaryIO.reader(fin.read())).adapter(fin)

    output = dumb_scripts(fin, 'Script')
    head, tail = os.path.split(filename)
    with open(os.path.join(output_folder + tail.replace(".bin", ".s")),
              'w') as out:
        out.write(output)

    return output
    def __init__(self,
                 config=configuration.Config(),
                 serializer=serialization.Serializer(),
                 payload=payload.Payload()):

        self.context = zmq.Context()
        self.settings = config
        self.serializer = serializer
        self.payload = payload

        self.controller = communication.connect_socket(
            self.context,
            socket_type=zmq.REQ,
            connection=self.settings.connections["commander"])

        self.logger = logging.getLogger("commander")
Example #24
0
def main():

    config = configuration.Config()

    connection = database.Connection(
        config.get_database_username(),
        config.get_database_password(),
        config.get_database_host(),
        config.get_database_name()
    )

    robot = bot.SoSayWeAllBot(config.get_reddit_password(), connection)

    robot.run_loop()

    connection.close()
Example #25
0
    def __init__(self, logs_dir, interface):
        # Definition variables
        self.plugin_folder = "%s\\plugins\\server_exemple\\"%(os.path.dirname(os.path.abspath(__file__)))
        self.config_file = '%sconfig.txt'%(self.plugin_folder)
        # Démarrage des services de logs
        self.Log_ex = log.Log("Server Exemple", logs_dir)
        self.Log_ex.append("Plugin loaded", "info")
        print("[server_exemple]start Plugin")

        self.configuration = configuration.Config("%s\\plugins\\"%(os.path.dirname(os.path.abspath(__file__))), "server_exemple")
        self.interface = interface

        # Check du service de config
        # Vérification du dossier
        if not self.configuration.read_value():
            self.start_pl = False
            self.Log_ex.append("No config file : plugin will not start.", "warn")
            self.configuration.append("server_name: exemple")
            self.configuration.append("ip: 0.0.0.0")
            self.configuration.append("port: 1234")
            self.configuration.append("pass: 0123456789")
        #Create value in the config file if missing
        if "server_name" not in self.configuration.read_value():
            self.start_pl = False
            self.configuration.append("server_name: exemple")
            self.Log_ex.append("Config file found but argument missing, creating.", "warn")
        if "ip" not in self.configuration.read_value():
            self.start_pl = False
            self.configuration.append("ip: 0.0.0.0")
            self.Log_ex.append("Config file found but argument missing, creating.", "warn")
        if "port" not in self.configuration.read_value():
            self.start_pl = False
            self.configuration.append("port: 1234")
            self.Log_ex.append("Config file found but argument missing, creating.", "warn")
        if "pass" not in self.configuration.read_value():
            self.start_pl = False
            self.configuration.append("pass: 0123456789")
            self.Log_ex.append("Config file found but argument missing, creating.", "warn")
        #Else we start
        else:
            self.start_pl = True
            self.ip_port = "%s:%s"%(self.configuration.read_value()['ip'], self.configuration.read_value()['port'])
            print("[server_exemple] Config file, plugin launch")
            print("[server_exemple] Start the server on : %s"%(self.ip_port))
            self.Log_ex.append("Config file found", "info")

        schedule.every().minute.do(self.job_server_exemple)
Example #26
0
def read_testcase(file_path):
    """
    Read the modem configuration and data from a testcase file.

    :param file_path: Path of the testcase text file.
    :return: A tuple (config, data).
    """
    print('reading testcase \'{}\'...'.format(file_path))
    with open(file_path, 'r') as f:
        mode = f.readline().strip()
        if mode == cf.BFSK:
            br = int(f.readline().strip())
            f0 = int(f.readline().strip())
            f1 = int(f.readline().strip())
            data = f.readline().strip()
            return cf.Config(mode, br, f0, f1), data
        else:
            raise NotImplementedError('unsupported modulation/encoding scheme')
def predict(texts):
    conf = configuration.Config()
    tokenizer = tokenization.FullTokenizer(
        vocab_file=conf.file_config.vocab_file)

    model = models.TransformerEncoder(conf)
    model = model.to(device)

    if os.path.exists(
            os.path.join(conf.train_config.model_dir,
                         conf.train_config.model_name)):
        logging.info(' *** Loading model ***')
        model.load_state_dict(
            torch.load(
                os.path.join(conf.train_config.model_dir,
                             conf.train_config.model_name)))
    else:
        logging.info(' *** No model available. *** ')
        return

    predict_dataset = datasets.OnlineShopping(mode='single_predict',
                                              config=conf,
                                              tokenizer=tokenizer,
                                              auto_padding=True,
                                              texts=texts)
    predict_dataloader = DataLoader(predict_dataset,
                                    batch_size=len(predict_dataset),
                                    collate_fn=collate_fn)
    data = next(iter(predict_dataloader))
    text_ids, _ = [t.to(device) if t is not None else t for t in data]
    logits = model(text_ids)
    probs, predictions = get_predictions(logits)

    return dict(
        zip(texts, [{
            'result': label,
            'probability': prob
        } for label, prob in zip([
            predict_dataset.convert_label_id_to_value(prediction.item())
            for prediction in predictions
        ], [prob.item() for prob in probs])]))
def main_dnn_evaluate(_):
    config_file = '../conf-005es18066/traffic_dnn-05min.conf'
    config_file = '../conf-005es18066/traffic_dnn-05min-b1152.conf'
    # config_file = '../conf-005es18066/traffic_dnn-minute10-b288.conf'
    # config_file = '../conf-005es18066/traffic_dnn-minute15-b288.conf'
    # config_file = '../conf-005es18066/traffic_dnn-minute20-b288.conf'
    # config_file = '../conf-005es18066/traffic_dnn-minute30-b288.conf'
    # config_file = '../conf-005es18066/traffic_dnn-minute60-b288.conf'

    # config_file = '../conf/traffic_dnn-05min.conf'
    # config_file = '../conf/traffic_dnn-minute10.conf'
    # config_file = '../conf/traffic_dnn-minute15.conf'
    # config_file = '../conf/traffic_dnn-minute20.conf'
    # config_file = '../conf/traffic_dnn-minute30.conf'
    # config_file = '../conf/traffic_dnn-minute60.conf'

    config = cf.Config(config_file)

    # 'model-005es18115-minute05-15182295'
    #config.model_name = 'model-005es18066-minute05'
    model_name_postfix = '-15182295'
    evaluate_with_config(config, model_name_postfix, True)
def try_to_train(train_fn, try_block=True, overwrite=False, **kargs):
    """Wrapper for the main training function."""
    config = conf.Config(**kargs)
    config.overwrite_safety_check(overwrite)
    if config.resume_training:
        print('INFO: Resuming training from checkpoint.')
        fp = os.path.join(config.log_path, 'config.pkl')
        config = conf.load_config(fp)
        config.resume_training = True
        config.checkpoint_path = kargs.pop('log_path')
        config.lr_end = kargs.pop('lr_end')
        config.max_epoch = kargs.pop('max_epoch')
    else:
        config.save_config_to_file()
    if try_block:
        try:
            train_fn(config)
        except KeyboardInterrupt:
            raise KeyboardInterrupt
        except:
            error_log = sys.exc_info()
            traceback_extract = tb.format_list(tb.extract_tb(error_log[2]))
            if not os.path.exists(config.log_path):
                os.makedirs(config.log_path)
            err_msg = 'Error occured:\r\n\r\n%s\r\n' % str(error_log[0])
            err_msg += '%s\r\n%s\r\n\r\n' % (str(
                error_log[1]), str(error_log[2]))
            err_msg += '\r\n\r\nTraceback stack:\r\n\r\n'
            for entry in traceback_extract:
                err_msg += '%s\r\n' % str(entry)
            name = 'error__' + os.path.split(config.log_path)[1] + '.txt'
            with open(os.path.join(os.path.dirname(config.log_path), name),
                      'w') as f:
                f.write(err_msg)
            print('\nWARNING: An error has occurred.\n')
            print(err_msg)
            #tf.reset_default_graph()
    else:
        train_fn(config)
Example #30
0
def check_run( basecalls_dir, working_dir, toolVersion ) :
	
	print 'Sequencing QC version is " ' + VERSION + ' "'
	
	# Check if basecalls dir exists
	ok , msg = sanity( basecalls_dir )
	if not ok : return False , msg # no data to analyze
	
	# Get sample sheet information
	config = configuration.Config( basecalls_dir )
	if len( config.sampleInfoDictionary ) == 0 : return False , "No Sample Sheet" # No info about data
	
	# clean up any old stats files
	if os.path.exists ( os.path.join( working_dir , config.runID, config.runID + "_stats.csv" ) ) :
		os.remove( os.path.join( working_dir , config.runID, config.runID + "_stats.csv" ) )
	
	# Plug in new instruments here
	if config.instrument_type == "miseq" :
		runQualityInfo = MiSeqQuality.MiSeqRunQualityInfo( config )
	#elif instrument_type == "hiseq" :

	runQualityInfo.writeReport2( config , toolVersion, working_dir )
	
	
	
	
	
	
	
	
	
	
	
	
	
	return runQualityInfo.OK() # True if data are usable