Exemplo n.º 1
0
def main():

    parse_arguments()

    # do my initialization, before starting the server
    server_init()
    
    srv = SSLWSGIRefServer(host=global_args.listen_host,
                           port=global_args.listen_port)
    run(server=srv)    
    return
Exemplo n.º 2
0
def main():

    parse_arguments()

    # do my initialization, before starting the server
    server_init()

    srv = SSLWSGIRefServer(host=global_args.listen_host,
                           port=global_args.listen_port)
    run(server=srv)
    return
Exemplo n.º 3
0
 def test_parse_arguments(self):
     self.assertEqual({'mode': 'rn_life_is_rubbish'},
                      arguments.parse_arguments("?mode=rn_life_is_rubbish"))
     self.assertEqual({
         'a': '1',
         'b': '2'
     }, arguments.parse_arguments("?a=1&b=2"))
     self.assertEqual(
         {'url': 'white space'},
         arguments.parse_arguments("?url=white+space")
     )  #urllib.quote_plus encodes ' ' as '+', amongst other things
Exemplo n.º 4
0
    def test_fit_list_and_predict_single_label(self):
        """Train and test model while training data has single label.
    Training data are not concatenated.
    """
        model_args, training_args, inference_args = arguments.parse_arguments()
        model_args.rnn_depth = 1
        model_args.rnn_hidden_size = 8
        model_args.observation_dim = 16
        training_args.learning_rate = 0.01
        training_args.train_iteration = 50
        inference_args.test_iteration = 1

        # generate fake training data, as a list
        train_sequences = [
            np.random.rand(100, model_args.observation_dim),
            np.random.rand(200, model_args.observation_dim),
            np.random.rand(300, model_args.observation_dim)
        ]
        train_cluster_ids = [
            np.array(['A'] * 100),
            np.array(['A'] * 200),
            np.array(['A'] * 300),
        ]

        model = UISRNN(model_args)

        # training
        model.fit(train_sequences, train_cluster_ids, training_args)

        # testing, where data has less variation than training
        test_sequence = np.random.rand(10, model_args.observation_dim) / 10.0
        predicted_label = model.predict(test_sequence, inference_args)
        self.assertListEqual([0] * 10, predicted_label)
Exemplo n.º 5
0
def main():  # pragma: no cover
    logger.info('Starting docker-pygen ...')

    kwargs = parse_arguments().__dict__

    if kwargs.get('debug'):
        set_log_level('DEBUG')

    logger.debug('Startup arguments: %s',
                 ', '.join('%s=%s' % item for item in kwargs.items()))

    app = PyGen(**kwargs)

    setup_signals(app)

    logger.debug('Signal handlers set up for SIGTERM, SIGINT and SIGHUP')

    try:
        app.update_target()

        logger.debug('Starting event watch loop')

        app.watch()

    finally:
        logger.info('Exiting...')

        app.stop()
Exemplo n.º 6
0
def main():
    arguments = parse_arguments()
    normailze_inputs(arguments)

    first_chanel = Channel(mu1)
    second_chanel = Channel(mu2)
    generator = Generator(lmbd)
    ticks_number = 100000 * accuracy
    declined_claims = 0
    generated_claims = 0
    processed_claims = 0
    for i in range(0, ticks_number):
        if generator.is_generated():
            generator.start_generate()
            generated_claims += 1
            if first_chanel.is_processed():
                first_chanel.add()
                processed_claims += 1
            elif second_chanel.is_processed():
                second_chanel.add()
                processed_claims += 1
            else:
                declined_claims += 1
        first_chanel.tick()
        second_chanel.tick()
        generator.tick()

    print('Occupancy of first chanel: ', first_chanel.work_time / ticks_number)
    print('Occupancy of second chanel: ',
          second_chanel.work_time / ticks_number)
    print('Decline probability: ', declined_claims / generated_claims)
    print('Relative system capacity: ', processed_claims / generated_claims)
    print('Absolute system capacity: ',
          processed_claims * normalizing_factor / ticks_number)
Exemplo n.º 7
0
def main(argv=sys.argv):
    """
    main function
    :param argv:    Incoming parameters
    :return:
    """
    # parse and load parameters
    parameters = load_parameters('parameters.json')
    arguments = parse_arguments(argv[1:])
    parameters = utils.parse_params(arguments, parameters)
    utils.print_parametes('parameters', parameters)
    # get model parameters
    model_parametes = get_model_parametes(parameters)
    # log file
    log_file = os.path.join(parameters["output_dir"], "log_%d" % time.time())
    log_f = utils.get_log_f(log_file)
    # data generator
    data_generator = DataGenerator(parameters)
    # create, train and infer model
    with tf.Session() as sess:
        model = ExampleModel(model_parametes)
        trainer = ExampleTrainer(sess, model, data_generator, parameters,
                                 log_f)
        trainer.train()

        # inference
        inference = ExampleInference(sess, model, data_generator, parameters,
                                     log_f)
        inference.infer()
Exemplo n.º 8
0
    def test_fit_concatenated_and_predict_single_label(self):
        """Train and test model while training data has single label.
    Training data have already been concatenated.
    """
        model_args, training_args, inference_args = arguments.parse_arguments()
        model_args.rnn_depth = 1
        model_args.rnn_hidden_size = 8
        model_args.observation_dim = 16
        training_args.learning_rate = 0.01
        training_args.train_iteration = 50
        inference_args.test_iteration = 1

        # generate fake training data, assume already concatenated
        train_sequence = np.random.rand(1000, model_args.observation_dim)
        train_cluster_id = np.array(['A'] * 1000)

        model = UISRNN(model_args)

        # training
        model.fit(train_sequence, train_cluster_id, training_args)

        # testing, where data has less variation than training
        test_sequence = np.random.rand(10, model_args.observation_dim) / 10.0
        predicted_label = model.predict(test_sequence, inference_args)
        self.assertListEqual([0] * 10, predicted_label)

        # testing on two sequences
        test_sequence1 = np.random.rand(10, model_args.observation_dim) / 10.0
        test_sequence2 = np.random.rand(10, model_args.observation_dim) / 10.0
        predicted_cluster_ids = model.predict([test_sequence1, test_sequence2],
                                              inference_args)
        self.assertIsInstance(predicted_cluster_ids, list)
        self.assertEqual(2, len(predicted_cluster_ids))
        self.assertListEqual([0] * 10, predicted_cluster_ids[0])
        self.assertListEqual([0] * 10, predicted_cluster_ids[1])
Exemplo n.º 9
0
 def test_save_and_load(self):
     """Save model and load it."""
     model_args, _, _ = arguments.parse_arguments()
     model_args.observation_dim = 16
     model_args.transition_bias = 0.5
     model_args.sigma2 = 0.05
     model = UISRNN(model_args)
     temp_file_path = tempfile.mktemp()
     model.save(temp_file_path)
     model.load(temp_file_path)
     self.assertEqual(0.5, model.transition_bias)
Exemplo n.º 10
0
def run_pargenes(args):
    print_header(args)
    start = time.time()
    ret = 0
    try:
        ret = main_raxml_runner(args, arguments.parse_arguments(args))
    except Exception as inst:
        logger.info("[Error] " + str(type(inst)) + " " + str(inst))
        sys.exit(1)
    end = time.time()
    logger.timed_log(start, "END OF THE RUN OF " + os.path.basename(__file__))
    if (ret != 0):
        logger.info("Something went wrong, please check the logs")
Exemplo n.º 11
0
def main():

    args = parse_arguments()

    #print('>>>>>>>>>>>SLURM_NODELIST', os.environ['SLURM_NODELIST'])
    #print('>>>>>>>>>>>SLURM_STEP_NODELIST', os.environ['SLURM_STEP_NODELIST'])
    #print('>>>>>>>>>>>SLURM_NODEID', os.environ['SLURM_NODEID'])
    #print('>>>>>>>>>>>SLURM_PROCID', os.environ['SLURM_PROCID'])
    #print('>>>>>>>>>>>SLURM_LOCALID', os.environ['SLURM_LOCALID'])

    print('===>>>', 'Starting distributed...')
    # start threads for each of chosen gpus within one node
    mp.spawn(distributed_node, nprocs=len(args.gpus), args=(args, ))
    print('===>>>', 'Distributed finished.')
def main():

    args = arguments.parse_arguments()

    if args["debug"] is True:
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
        logging.getLogger("requests").setLevel(logging.WARNING)

    logging.basicConfig(
        level=log_level, format="%(asctime)s %(name)s %(levelname)s %(message)s", handlers=[logging.StreamHandler()]
    )

    logging.debug("container: %s, limit: %s, threads: %s", args["container"], args["limit"], args["threads"])

    if (
        variables.os_tenant_name is None
        or variables.os_username is None
        or variables.os_password is None
        or variables.os_auth_url is None
    ):
        print "The OS_TENANT_NAME, OS_USERNAME, OS_PASSWORD, OS_AUTH_URL environment " "variables must be set!"
        return

    try:
        threads = []

        # Create the requested number of threads
        for i in range(int(args["threads"])):
            t = threading.Thread(target=worker, args=(args["container"], args["limit"], i))
            t.daemon = True
            threads.append(t)
            t.start()
            time.sleep(1)

        # Keep looping until all of the threads have died
        while True:
            need_return = True

            for thread in threads:
                if thread.isAlive() is True:
                    need_return = False

            if need_return is True:
                return
    except KeyboardInterrupt:
        return
Exemplo n.º 13
0
def run_pargenes(args):
    start = time.time()
    ret = 0
    try:
        op = arguments.parse_arguments(args)
    except Exception as inst:
        logger.info("[Error] " + str(type(inst)) + " " + str(inst))
        sys.exit(1)
    try:
        ret = main_raxml_runner(args, op)
    except Exception as inst:
        logger.info("[Error] " + str(type(inst)) + " " + str(inst))
        report.report_and_exit(op.output_dir, 1)
    end = time.time()
    logger.timed_log("END OF THE RUN OF " + os.path.basename(__file__))
    if (ret != 0):
        logger.info("Something went wrong, please check the logs")
Exemplo n.º 14
0
    def test_fit_with_wrong_dim(self):
        """Training data has wrong dimension."""
        model_args, training_args, _ = arguments.parse_arguments()
        model_args.rnn_depth = 1
        model_args.rnn_hidden_size = 8
        model_args.observation_dim = 16
        training_args.learning_rate = 0.01
        training_args.train_iteration = 5

        # generate fake data
        train_sequence = np.random.rand(1000, 18)
        train_cluster_id = np.array(['A'] * 1000)

        model = UISRNN(model_args)

        # training
        with self.assertRaises(ValueError):
            model.fit(train_sequence, train_cluster_id, training_args)
Exemplo n.º 15
0
async def main():
    config = parse_arguments()

    setup_logger(config)

    username = config.username
    token = config.token
    user = await github.get_user(username, token)

    repos = await github.get_repositories(user, token)
    repos = [repo for inner in repos for repo in inner]

    if config.connection == ConnectionType.SSH:
        urls = [repo['ssh_url'] for repo in repos]
    else:
        urls = [repo['clone_url'] for repo in repos]

    working_directory = os.getcwd()
    current_directories = os.listdir(path=working_directory)
Exemplo n.º 16
0
def main(args=None):
    # use system arguments if args is not specified
    args = args or sys.argv

    # create default configparser
    config = create_default_config()

    # parse argument, args indicate the non optional arguments
    # which will be used to call external program
    args, opts = parse_arguments(args, config)

    if opts.setup:
        # run setup wizard
        from wizard import setup_wizard
        setup_wizard(config)
    elif opts.check:
        # run check
        from conf import get_user_config_filename
        call_and_notificate(['cat', get_user_config_filename()], opts)
    elif len(args) > 0:
        # call and notify user
        call_and_notificate(args, opts)
Exemplo n.º 17
0
def main():
  model_args, training_args, _ = arguments.parse_arguments()
  train(model_args, training_args)
            output, attention = model.decoder(tgt_tensor, enc_src, tgt_mask,
                                              src_mask)

        pred_token = output.argmax(2)[:, -1].item()
        tgt_indexes.append(pred_token)

        if pred_token == tgt_field.vocab.stoi[tgt_field.eos_token]:
            break
    tgt_tokens = [tgt_field.vocab.itos[i] for i in tgt_indexes]

    return tgt_tokens[1:], attention


if __name__ == "__main__":

    args = parse_arguments()
    hparams_file = args.config
    hparams = None
    with open(hparams_file) as fin:
        hparams = yaml.load(fin, Loader=yaml.FullLoader)
    assert hparams is not None, "args.config: {} not found".format(args.config)

    device = torch.device("cuda:0")

    encoder = Encoder(hparams["input_dim"], hparams["hid_dim"],
                      hparams["encoder_layers"], hparams["encoder_heads"],
                      hparams["encoder_pf_dim"], hparams["encoder_dropout"],
                      device)
    logging.debug("encoder: {}".format(encoder))

    decoder = Decoder(hparams["output_dim"], hparams["hid_dim"],
"""
Module containing the main code to setup and run the simulation
"""

from arguments import parse_arguments
import gal_sim
import uni_sim

if __name__ == "__main__":
    ARGS = parse_arguments()

    if ARGS.simulation == "g":
        gal_sim.run(ARGS)

    if ARGS.simulation == "u":
        uni_sim.run(ARGS)
Exemplo n.º 20
0
import sys
import time
from tqdm import tqdm
from numba import cuda
from numba.cuda.random import create_xoroshiro128p_states
from arguments import parse_arguments
from qap.input_file_reader import InputFileReader
from qap.solution_file_reader import SolutionFileReader
from qap.qap import QAP
from em_discrete.em_discrete import generate_permutations
from em_discrete.em_discrete_cuda import em_discrete
from plot import plot_results

if __name__ == '__main__':
    input_file, solution_file, permutations_count, iterations, hamming_distance = parse_arguments(
        sys.argv[1:])

    input_reader = InputFileReader(input_file)
    dimension, weights, distances = input_reader.read()

    qap = QAP(weights, distances)

    optimal_value = None
    optimal_permutation = None

    if solution_file is not None:
        solution_reader = SolutionFileReader(solution_file)
        solution_dimension, solution_value, solution_permutation = solution_reader.read(
        )

        if solution_dimension != dimension:
 def test_parse_arguments(self):
     self.assertEqual({'mode': 'rn_life_is_rubbish'}, arguments.parse_arguments("?mode=rn_life_is_rubbish"))
     self.assertEqual({'a': '1', 'b': '2'}, arguments.parse_arguments("?a=1&b=2"))
     self.assertEqual({'url': 'white space'}, arguments.parse_arguments("?url=white+space")) #urllib.quote_plus encodes ' ' as '+', amongst other things 
Exemplo n.º 22
0
def show_tree():
    import locale
    try:
        locale.setlocale(locale.LC_ALL, locale.locale_alias['hu_hu'])
    except:
        pass
    sorted_grapes = database.get_grapes(app.driver)
    sorted_grapes.sort(key=lambda grape: locale.strxfrm(grape.name))
    sorted_regions = database.get_wineregions(app.driver)
    sorted_regions.sort(key=lambda region: locale.strxfrm(region.name))
    sorted_subregions = database.get_winesubregions(app.driver)
    sorted_subregions.sort(key=lambda region: locale.strxfrm(region.name))
    return render_template('index.html',
                           regions=sorted_regions,
                           subregions=sorted_subregions,
                           grapes=sorted_grapes)


app.register_blueprint(default)
app.register_blueprint(language_blueprint)

if __name__ == "__main__":
    app.debug = True
    args = arguments.parse_arguments()
    app.bolt_url = args.bolt_url
    app.username = args.user
    app.password = args.password
    app.driver = Driver(app.bolt_url, app.username, app.password)
    port = int(os.environ.get('PORT', 5000))
    app.run(host='0.0.0.0', port=port)
Exemplo n.º 23
0
import sys
import path

import config
import arguments

if __name__ == '__main__':
    arguments.parse_arguments(sys.argv)
Exemplo n.º 24
0
def main():
    model_args, training_args, inference_args = arguments.parse_arguments()
    test(model_args, training_args, inference_args)
Exemplo n.º 25
0
#!/usr/bin/python3
''' Interface principal do Candida '''
from arguments import parse_arguments

# processo a entrada e o help
cli_arguments = parse_arguments()
print("*** Candida ***")

if cli_arguments.a != None:
    print("* Procurando por '%s' *" % cli_arguments.a)
else:
    print("Use -a to say what software you want to remove")
    print("Use -h for help")
Exemplo n.º 26
0
        timed_print(start, "end of mlsearch mpi-scheduler run")
        checkpoint.write_checkpoint(output_dir, 3)
    if (op.random_starting_trees + op.parsimony_starting_trees > 1):
        if (checkpoint_index < 4):
            raxml.select_best_ml_tree(msas, op)
            timed_print(start, "end of selecting the best ML tree")
            checkpoint.write_checkpoint(output_dir, 4)
    if (op.bootstraps != 0):
        if (checkpoint_index < 5):
            bootstraps.concatenate_bootstraps(output_dir, min(16, op.cores))
            timed_print(start, "end of bootstraps concatenation")
            checkpoint.write_checkpoint(output_dir, 5)
        if (checkpoint_index < 6):
            bootstraps.run(output_dir, raxml_library, op.scheduler,
                           os.path.join(output_dir, "supports_run"), op.cores,
                           op)
            timed_print(start, "end of supports mpi-scheduler run")
            checkpoint.write_checkpoint(output_dir, 6)
    return 0


save_cout = sys.stdout
print_header()
start = time.time()
ret = main_raxml_runner(arguments.parse_arguments())
end = time.time()
timed_print(start, "END OF THE RUN OF " + os.path.basename(__file__))
sys.stdout = save_cout
if (ret != 0):
    print("Something went wrong, please check the logs")
Exemplo n.º 27
0
def main():
    args = parse_arguments()
    GAN = model(args)
    GAN()
Exemplo n.º 28
0
    def test_four_clusters(self):
        """Four clusters on vertices of a square."""
        label_to_center = {
            'A': np.array([0.0, 0.0]),
            'B': np.array([0.0, 1.0]),
            'C': np.array([1.0, 0.0]),
            'D': np.array([1.0, 1.0]),
        }

        # generate training data
        train_cluster_id = ['A'] * 400 + ['B'] * 300 + ['C'] * 200 + ['D'
                                                                      ] * 100
        random.shuffle(train_cluster_id)
        train_sequence = _generate_random_sequence(train_cluster_id,
                                                   label_to_center,
                                                   sigma=0.01)
        train_sequences = [
            train_sequence[:100, :], train_sequence[100:300, :],
            train_sequence[300:600, :], train_sequence[600:, :]
        ]
        train_cluster_ids = [
            train_cluster_id[:100], train_cluster_id[100:300],
            train_cluster_id[300:600], train_cluster_id[600:]
        ]

        # generate testing data
        test_cluster_id = ['A'] * 10 + ['B'] * 20 + ['C'] * 30 + ['D'] * 40
        random.shuffle(test_cluster_id)
        test_sequence = _generate_random_sequence(test_cluster_id,
                                                  label_to_center,
                                                  sigma=0.01)

        # construct model
        model_args, training_args, inference_args = arguments.parse_arguments()
        model_args.rnn_depth = 2
        model_args.rnn_hidden_size = 8
        model_args.observation_dim = 2
        model_args.verbosity = 3
        training_args.learning_rate = 0.01
        training_args.learning_rate_half_life = 50
        training_args.train_iteration = 200
        training_args.enforce_cluster_id_uniqueness = False
        inference_args.test_iteration = 2

        model = UISRNN(model_args)

        # run training, and save the model
        model.fit(train_sequences, train_cluster_ids, training_args)
        temp_file_path = tempfile.mktemp()
        model.save(temp_file_path)

        # run testing
        predicted_label = model.predict(test_sequence, inference_args)

        # run evaluation
        model.logger.print(
            3, 'Asserting the equivalence between'
            '\nGround truth: {}\nPredicted: {}'.format(test_cluster_id,
                                                       predicted_label))
        accuracy = evals.compute_sequence_match_accuracy(
            predicted_label, test_cluster_id)
        self.assertEqual(1.0, accuracy)

        # load new model
        loaded_model = UISRNN(model_args)
        loaded_model.load(temp_file_path)

        # run testing with loaded model
        predicted_label = loaded_model.predict(test_sequence, inference_args)

        # run evaluation with loaded model
        model.logger.print(
            3, 'Asserting the equivalence between'
            '\nGround truth: {}\nPredicted: {}'.format(test_cluster_id,
                                                       predicted_label))
        accuracy = evals.compute_sequence_match_accuracy(
            predicted_label, test_cluster_id)
        self.assertEqual(1.0, accuracy)

        # keep training from loaded model on a subset of training data
        transition_bias_1 = model.transition_bias
        training_args.learning_rate = 0.001
        training_args.train_iteration = 50
        model.fit(train_sequence[:100, :], train_cluster_id[:100],
                  training_args)
        transition_bias_2 = model.transition_bias
        self.assertNotAlmostEqual(transition_bias_1, transition_bias_2)
        model.logger.print(
            3, 'Asserting transition_bias changed from {} to {}'.format(
                transition_bias_1, transition_bias_2))

        # run evaluation
        model.logger.print(
            3, 'Asserting the equivalence between'
            '\nGround truth: {}\nPredicted: {}'.format(test_cluster_id,
                                                       predicted_label))
        accuracy = evals.compute_sequence_match_accuracy(
            predicted_label, test_cluster_id)
        self.assertEqual(1.0, accuracy)
def main(wav_path, embedding_per_second=1.0, overlap_rate=0.5):

    # gpu configuration
    toolkits.initialize_GPU(args)
    params = {
        'dim': (257, None, 1),
        'nfft': 512,
        'spec_len': 250,
        'win_length': 400,
        'hop_length': 160,
        'n_classes': 5994,
        'sampling_rate': 16000,
        'normalize': True,
    }

    network_eval = spkModel.vggvox_resnet2d_icassp(
        input_dim=params['dim'],
        num_class=params['n_classes'],
        mode='eval',
        args=args)
    network_eval.load_weights(args.resume, by_name=True)

    model_args, _, inference_args = arguments.parse_arguments()
    model_args.observation_dim = 512
    diarization_Model = UISRNN(model_args)
    diarization_Model.load(SAVED_MODEL_NAME)
    specs, intervals = load_data(wav_path,
                                 embedding_per_second=embedding_per_second,
                                 overlap_rate=overlap_rate)
    mapTable, keys = genMap(intervals)
    feats = []

    for spec in specs:
        spec = np.expand_dims(np.expand_dims(spec, 0), -1)
        v = network_eval.predict(spec)
        feats += [v]

    feats = np.array(feats)[:, 0, :].astype(float)  # [splits, embedding dim]
    predicted_label = diarization_Model.predict(feats, inference_args)

    time_spec_rate = 1000 * (1.0 / embedding_per_second) * (
        1.0 - overlap_rate)  # speaker embedding every ?ms
    center_duration = int(1000 * (1.0 / embedding_per_second) // 2)
    speakerSlice = arrangeResult(predicted_label, time_spec_rate)

    for spk, timeDicts in speakerSlice.items(
    ):  # time map to orgin wav(contains mute)
        for tid, timeDict in enumerate(timeDicts):
            s = 0
            e = 0
            for i, key in enumerate(keys):
                if (s != 0 and e != 0):
                    break
                if (s == 0 and key > timeDict['start']):
                    offset = timeDict['start'] - keys[i - 1]
                    s = mapTable[keys[i - 1]] + offset
                if (e == 0 and key > timeDict['stop']):
                    offset = timeDict['stop'] - keys[i - 1]
                    e = mapTable[keys[i - 1]] + offset

            speakerSlice[spk][tid]['start'] = s
            speakerSlice[spk][tid]['stop'] = e

    for spk, timeDicts in speakerSlice.items():
        print('========= ' + str(spk) + ' =========')
        for timeDict in timeDicts:
            s = timeDict['start']
            e = timeDict['stop']
            s = fmtTime(s)  # change point moves to the center of the slice
            e = fmtTime(e)
            print(s + ' ==> ' + e)

    p = PlotDiar(map=speakerSlice,
                 wav=wav_path,
                 gui=True,
                 pick=True,
                 size=(25, 6))
    p.draw()
    p.plot.show()
Exemplo n.º 30
0
#!/usr/bin/python
# encoding: utf-8

#==================================
#@file name: train_file
#@author: Lixin Zou
#@contact: [email protected]
#@time:2019/2/7,12:38 AM
#==================================

import numpy as np
import ipdb
from arguments import parse_arguments, initialize
import utils
import os
import time
from train import run_all, evaluate

if __name__ == '__main__':
    np.random.seed(int(time.time()))
    config = parse_arguments()
    config = initialize(config)
    if config.task == "train":
        run_all(config).run()
    elif config.task == "evaluate":
        evaluate(config).run()