Ejemplo n.º 1
0
def main():
    parse_args(config)
    set_logger(config, logger)
    if config['daemon']:
        daemon()
    pid_file = config['pidfile']
    mk_pid_file(pid_file)
    run_server(config)
Ejemplo n.º 2
0
def conv_spatial():
    mpi_num_procs, omp_num_threads = utils.parse_args()
    num_steps = 10
    T_END = 1.0
    # ############################################################################
    # # TEST 2: SPATIAL CONVERGENCE
    # ############################################################################
    tl_factor = 0.1
    tl_init   = 1
    tl_list   = [tl_init*math.pow(tl_factor,float(cnt)) for cnt in range(0,num_steps)]

    dt_factor = 1
    dt_init   = T_END/2**20
    dt_list   = [dt_init*math.pow(dt_factor,float(cnt)) for cnt in range(0,num_steps)]

    tn_factor = 1.0/dt_factor
    tn_init   = 1
    tn_list   = [tn_init*math.pow(tn_factor,float(cnt)) for cnt in range(0,num_steps)]

    # NUM MPI PROCESSES
    np_list   = [mpi_num_procs  for cnt in range(0, num_steps)]

    # NUM OMP THREADS
    nt_list   = [omp_num_threads for cnt in range(0, num_steps)]

    cmd_args = generate_command_args(tl_list,\
                                     dt_list,\
                                     tn_list,\
                                     # de_list,\
                                     # q_list, \
                                     np_list,\
                                     nt_list,\
                                     num_steps)
    utils.execute_commands(cmd_args,'spatial')
Ejemplo n.º 3
0
def conv_temporal():
    mpi_num_procs, omp_num_threads = utils.parse_args()
    num_steps = 8
    ############################################################################
    # TEST 1: TEMPORAL CONVERGENCE
    ############################################################################
    # prog          = 'advdiff-ss'
    # prog          = 'advdiff-ss-tv'
    prog          = 'advdiff-ss-tv-extrap'
    vtk_save_rate = 0
    mrg_type      = 3
    np            = mpi_num_procs
    num_pnts      = 8**(math.floor(math.log(np,8)+1))
    nt            = omp_num_threads

    # TREE TOLERANCE
    tl_factor = 1#0.1
    tl_init   = 1e-5
    tl_list   = [tl_init*math.pow(tl_factor,float(cnt)) for cnt in range(0,num_steps)]

    # TIME RESOLUTION
    dt_factor = 0.5
    # dt_init   = 0.1/16#0.5**5
    dt_init   = 0.5**0
    dt_list   = [dt_init*math.pow(dt_factor,float(cnt)) for cnt in range(0,num_steps)]

    # NUM TIME STEPS
    T_END     = 1.0
    tn_factor = 1.0/dt_factor
    tn_init   = T_END/dt_init
    tn_list   = [tn_init*math.pow(tn_factor,float(cnt)) for cnt in range(0,num_steps)]

    dp_list = [15            for cnt in range(0,num_steps)]
    cq_list = [14            for cnt in range(0,num_steps)]
    ci_list = [True          for cnt in range(0,num_steps)]
    uf_list = [2             for cnt in range(0,num_steps)]
    pn_list = [num_pnts      for cnt in range(0,num_steps)]
    np_list = [np            for cnt in range(0,num_steps)]
    nt_list = [nt            for cnt in range(0,num_steps)]
    mg_list = [mrg_type      for cnt in range(0,num_steps)]
    vs_list = [vtk_save_rate for cnt in range(0,num_steps)]
    tt_list = [11            for cnt in range(0,num_steps)]

    cmd_args = OrderedDict()
    cmd_args = utils.generate_commands(
        prog,
        pn_list,
        tl_list,
        dp_list,
        cq_list,
        ci_list,
        uf_list,
        np_list,
        nt_list,
        dt_list,
        tn_list,
        vs_list,
        mg_list,
        tt_list)
    utils.execute_commands(cmd_args, 'temporal')
Ejemplo n.º 4
0
    def _parse_args(self, args_str):
        parser = argparse.ArgumentParser()

        help="Path to contrail-api conf file, default /etc/contrail-api.conf"
        parser.add_argument(
            "--api-conf", help=help, default="/etc/contrail/contrail-api.conf")
        parser.add_argument(
            "--verbose", help="Run in verbose/INFO mode, default False",
            action='store_true', default=False)
        parser.add_argument(
            "--debug", help="Run in debug mode, default False",
            action='store_true', default=False)
        parser.add_argument(
            "--ifmap-servers",
            help="List of ifmap-ip:ifmap-port, default from api-conf")
        parser.add_argument(
            "--ifmap-credentials",
            help="<username>:<password> for read-only user",
            required=True)

        args_obj, remaining_argv = parser.parse_known_args(args_str.split())
        self._args = args_obj

        self._api_args = utils.parse_args('-c %s %s'
            %(self._args.api_conf, ' '.join(remaining_argv)))[0]
Ejemplo n.º 5
0
def conv_temporal_spatial():
    mpi_num_procs, omp_num_threads = utils.parse_args()
    prog = 'advection'
    num_steps = 8

    ############################################################################
    # TEST 3: TEMPORAL/SPATIAL ERROR
    ############################################################################
    tl_fact = 0.1
    tl_init = 1e-1
    tl_list = [tl_init*math.pow(tl_fact,float(cnt)) for cnt in range(0,num_steps)]

    dt_fact = 0.5
    dt_init = 1
    dt_list = [dt_init*math.pow(dt_fact,float(cnt)) for cnt in range(0,num_steps)]

    tn_fact = 1.0/dt_fact
    tn_init = T_END/dt_init
    tn_list = [tn_init*math.pow(tn_fact,float(cnt)) for cnt in range(0,num_steps)]

    # NUM MPI PROCESSES
    np_list = [mpi_num_procs  for cnt in range(0, num_steps)]

    # NUM OMP THREADS
    nt_list = [omp_num_threads for cnt in range(0, num_steps)]
Ejemplo n.º 6
0
 def test_parse_args__empty(self):
     """with empty args."""
     dummy_request = dummy({})
     result = utils.parse_args(dummy_request, {})
     self.assertTrue('results' in result)
     self.assertEqual(result['results'], {})
     self.assertTrue('errors' in result)
     self.assertEqual(result['errors'], [])
Ejemplo n.º 7
0
 def test_parse_args__invalid(self):
     """with invalid args."""
     dummy_request = dummy({'price': 'Error Message', 'downpayment': 10})
     result = utils.parse_args(dummy_request, params)
     self.assertTrue('results' in result)
     self.assertTrue('price' not in result['results'])
     self.assertTrue('errors' in result)
     self.assertTrue('Error Message' in result['errors'][0])
Ejemplo n.º 8
0
    def load(self, parser_args):
        try:
            # parse the supplied args with ConfigParser.parse_args
            self._args = self._parser.parse_args(parser_args)
        except IOError as e:
            log.error("Could not open file %s: %s" %
                      (e.filename, e.strerror))
            exit(1)

        if self._args.debug:
            log.setLevel(logging.DEBUG)
            log.debug("Debug enabled")
        self.METHOD_TYPE = getattr(self._args, 'type')
        self.METHOD = getattr(self._args, 'subparser_name')

        if self.METHOD not in ['help']:
            self.HOSTS = {}
            rets = {}
            for host in self._args.hosts:
                self.HOSTS[host] = Zabbix(
                    host,
                    self._args.uri_path,
                    self._args.user,
                    self._args.noverify,
                    self._args.cacert,
                    self._args.http,
                    self._args.timeout
                )
                zapi_function = getattr(
                    getattr(getattr(self.HOSTS[host], 'zapi'), self.METHOD_TYPE), self.METHOD)

                # If the listkeys argument was supplied, we need to override
                # args.arguments to pull one resource
                if getattr(self._args, 'listkeys', False):
                    self._args.arguments = ['output=extend', 'limit=1']
                # convert the arguments into the required format for the zapi
                # object.
                args_real = parse_args(self._args.arguments)

                # Parse specific cli arguments and update args_real
                args_to_parse = ['search', 'filter']
                for key in args_to_parse:
                    if getattr(self._args, key, None) is not None:
                        args_real[key] = parse_args(getattr(self._args, key))

                self.JOBS[host] = (zapi_function, args_real)
Ejemplo n.º 9
0
def main():
    # Parse the JSON arguments
    config_args = parse_args()

    # Create the experiment directories
    _, config_args.summary_dir, config_args.checkpoint_dir = create_experiment_dirs(config_args.experiment_dir)

    # Reset the default Tensorflow graph
    tf.reset_default_graph()

    # Tensorflow specific configuration
    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    # Data loading
    # The batch size is equal to 1 when testing to simulate the real experiment.
    data_batch_size = config_args.batch_size if config_args.train_or_test == "train" else 1
    data = DataLoader(data_batch_size, config_args.shuffle)
    print("Loading Data...")
    config_args.img_height, config_args.img_width, config_args.num_channels, \
    config_args.train_data_size, config_args.test_data_size = data.load_data()
    print("Data loaded\n\n")

    # Model creation
    print("Building the model...")
    model = ShuffleNet(config_args)
    print("Model is built successfully\n\n")

    # Parameters visualization
    show_parameters()

    # Summarizer creation
    summarizer = Summarizer(sess, config_args.summary_dir)
    # Train class
    trainer = Train(sess, model, data, summarizer)

    if config_args.train_or_test == 'train':
        try:
            # print("FLOPs for batch size = " + str(config_args.batch_size) + "\n")
            # calculate_flops()
            print("Training...")
            trainer.train()
            print("Training Finished\n\n")
        except KeyboardInterrupt:
            trainer.save_model()

    elif config_args.train_or_test == 'test':
        # print("FLOPs for single inference \n")
        # calculate_flops()
        # This can be 'val' or 'test' or even 'train' according to the needs.
        print("Testing...")
        trainer.test('val')
        print("Testing Finished\n\n")

    else:
        raise ValueError("Train or Test options only are allowed")
Ejemplo n.º 10
0
 def process_request(self, request):
     """Get input, return results."""
     results = parse_args(request, PARAMETERS)
     self.request = results['results']
     if len(results['errors']) > 0:
         self.errors = results['errors']
         self.status = 'Error'
     self._defaults()
     self._county_limit_data()
     return self._output()
Ejemplo n.º 11
0
def conv_temporal_spatial_long_time():
    ############################################################################
    # TEST 2: CONVERGENCE TEST FOR ADVECTION
    ############################################################################
    mpi_num_procs, omp_num_threads = utils.parse_args()
    prog          = 'advection'
    dt            = 0.0628
    vsr           = 0
    mrg_type      = 3
    np            = mpi_num_procs
    num_pnts      = 8**(math.floor(math.log(np,8)+1))
    nt            = omp_num_threads

    # UNIFORM
    dp_list = [5    , 6    , 7    ]#, 5     , 6     , 7    ]
    cq_list = [3    , 3    , 3    ]#, 3     , 3     , 3    ]
    ci_list = [False , False , False ]#, False , False , False]
    uf_list = [2    , 2    , 2    ]#, 2     , 2     , 2    ]
    dt_list = [dt   , dt/2 , dt/4 ]#, dt    , dt/2  , dt/4 ]
    tn_list = [100  , 200  , 400  ]#, 100   , 200   , 400  ]
    num_steps = len(dp_list)
    tl_list = [1e-30         for cnt in range(0,num_steps)]

    # ADAPTIVE
    # tl_list = [1e-02, 1e-03, 1e-04, 1e-02 , 1e-03 , 1e-04]
    # dp_list = [15   , 15   , 15   , 15    , 15    , 15   ]
    # cq_list = [3    , 3    , 3    , 3     , 3     , 3    ]
    # ci_list = [True , True , True , False , False , False]
    # uf_list = [2    , 2    , 2    , 2     , 2     , 2    ]
    # dt_list = [dt   , dt/2 , dt/4 , dt    , dt/2  , dt/4 ]
    # tn_list = [100  , 200  , 400  , 100   , 200   , 400  ]

    num_steps = len(dp_list)
    pn_list = [num_pnts      for cnt in range(0,num_steps)]
    np_list = [np            for cnt in range(0,num_steps)]
    nt_list = [nt            for cnt in range(0,num_steps)]
    mg_list = [mrg_type      for cnt in range(0,num_steps)]
    vs_list = [vsr           for cnt in range(0,num_steps)]

    cmd_args = OrderedDict()
    cmd_args = utils.generate_commands(
        prog,
        pn_list,
        tl_list,
        dp_list,
        cq_list,
        ci_list,
        uf_list,
        np_list,
        nt_list,
        dt_list,
        tn_list,
        vs_list,
        mg_list)
    utils.execute_commands(cmd_args, 'temporal-spatial-long-time')
Ejemplo n.º 12
0
    def process_request(self, request):
        """The main function which processes request and returns result
        back."""

        results = parse_args(request, PARAMETERS)
        self.request = results['results']
        if len(results['errors']) > 0:
            self.errors = results['errors']
            self.status = 'Error'
        self._defaults()
        self._data()
        return self._output()
Ejemplo n.º 13
0
def test1():
    mpi_num_procs, omp_num_threads = utils.parse_args()
    ############################################################################
    # TEST 1: TEMPORAL/SPATIAL ERROR
    ############################################################################
    num_steps = 6

    # TREE TOLERANCE
    tl_factor = 1
    tl_init   = 1e-10
    tl_list = [tl_init*math.pow(tl_factor,float(cnt)) for cnt in range(0, num_steps)]

    # TREE DEPTH
    # de_factor = 0
    # de_init   = 5
    # de_list = [de_init+cnt*de_factor                  for cnt in range(0, num_steps)]
    de_list = [5,5,5,7,7,7]

    # TIME RESOLUTION
    dt_factor = 1
    dt_init   = 6.28*1e-2
    dt_list = [dt_init*math.pow(dt_factor,float(cnt)) for cnt in range(0, num_steps)]

    # NUM TIME STEPS
    NUM_ROT = 5
    T_END = 6.28*NUM_ROT
    tn_factor = 1
    tn_init   = T_END/dt_init
    tn_list = [tn_init*math.pow(tn_factor,float(cnt)) for cnt in range(0, num_steps)]

    # CHEBYSHEV DEGREE
    # q_factor  = 1;
    # q_init    = 2;
    # q_list  = [q_init*math.pow(q_factor,float(cnt))   for cnt in range(0, num_steps)]
    q_list = [3,8,14,3,8,14]

    # NUM MPI PROCESSES
    np_list = [mpi_num_procs  for cnt in range(0, num_steps)]

    # NUM OMP THREADS
    nt_list = [omp_num_threads for cnt in range(0, num_steps)]

    cmd_args = generate_command_args(tl_list,\
                                     dt_list,\
                                     tn_list,\
                                     de_list,\
                                     q_list, \
                                     np_list,\
                                     nt_list,\
                                     num_steps)

    utils.execute_commands(cmd_args,'table1-ROT'+str(NUM_ROT))
Ejemplo n.º 14
0
def scals():
    mpi_num_procs, omp_num_threads = utils.parse_args()
    ############################################################################
    # TEST 1: STRONG SCALING
    ############################################################################
    # prog  = 'advection'
    prog  = 'advdiff-ss'
    np_list = [
             1,
             2,
             4,
             8,
             # 16,
             # 32,
             # 64,
            ]
    mrg_type = 3;
    max_np        = max(np_list)
    num_pnts      = 8**3#(math.floor(math.log(max_np,8)+1))

    num_steps = len(np_list)
    pn_list = [num_pnts        for cnt in range(0,num_steps)]
    tl_list = [1e-0            for cnt in range(0,num_steps)]
    dp_list = [15              for cnt in range(0,num_steps)]
    cq_list = [14              for cnt in range(0,num_steps)]
    ci_list = [True            for cnt in range(0,num_steps)]
    uf_list = [4               for cnt in range(0,num_steps)]
    nt_list = [omp_num_threads for cnt in range(0,num_steps)]
    dt_list = [0.125           for cnt in range(0,num_steps)]
    tn_list = [1               for cnt in range(0,num_steps)]
    vs_list = [0               for cnt in range(0,num_steps)]
    mg_list = [mrg_type        for cnt in range(0,num_steps)]

    cmd_args = OrderedDict()
    cmd_args = utils.generate_commands(
        prog,
        pn_list,
        tl_list,
        dp_list,
        cq_list,
        ci_list,
        uf_list,
        np_list,
        nt_list,
        dt_list,
        tn_list,
        vs_list,
        mg_list)
    utils.execute_commands(cmd_args, 'sscal'+prog)
def main():
    """
    main function to start the application
    """
    # parse the parameters
    params = parse_args(sys.argv[1:])
    if not params:
        return

    # create the object and pass the params required
    recommend = Recommendation(params.get("input_movies"),
                               params.get("file_path"),
                               params.get("show_count"))
    # get he recommends movies based on data provided
    top_movies = recommend.get_recommended_movies()
    # print the movies on user console
    show_remommend_movies(top_movies, recommend.get_movies_dict())

    return 0
Ejemplo n.º 16
0
def omp():
    ############################################################################
    # TEST: OMP SCALING
    ############################################################################
    mpi_num_procs, omp_num_threads = utils.parse_args()
    # prog     = 'advection'
    prog     = 'advdiff-ss'
    dt       = 0.0628
    vsr      = 0
    mrg_type = 3
    num_pnts = 8**(math.floor(math.log(mpi_num_procs,8)+1))

    num_steps = omp_num_threads
    nt_list = [cnt+1    for cnt in range(0,num_steps)]
    np_list = [mpi_num_procs for cnt in range(0,num_steps)]

    tn_list = [10       for cnt in range(0,num_steps)]
    pn_list = [num_pnts for cnt in range(0,num_steps)]
    tl_list = [1e-30    for cnt in range(0,num_steps)]
    dp_list = [4        for cnt in range(0,num_steps)]
    mg_list = [mrg_type for cnt in range(0,num_steps)]
    vs_list = [vsr      for cnt in range(0,num_steps)]
    cq_list = [14       for cnt in range(0,num_steps)]
    ci_list = [True     for cnt in range(0,num_steps)]
    uf_list = [4        for cnt in range(0,num_steps)]
    dt_list = [dt       for cnt in range(0,num_steps)]

    cmd_args = OrderedDict()
    cmd_args = utils.generate_commands(
        prog,
        pn_list,
        tl_list,
        dp_list,
        cq_list,
        ci_list,
        uf_list,
        np_list,
        nt_list,
        dt_list,
        tn_list,
        vs_list,
        mg_list)
    utils.execute_commands(cmd_args, 'omp'+prog)
Ejemplo n.º 17
0
Archivo: imb.py Proyecto: arashb/tbslas
def test2():
    ############################################################################
    # TEST 2: V depth: [6] C depth: [5, 7, 9] config: regular V, irregular C
    ############################################################################
    mpi_num_procs, omp_num_threads = utils.parse_args()
    num_steps = 3

    mt_factor = 1
    mt_init = 1
    mt_list = [mt_init+cnt*mt_factor                  for cnt in range(0, num_steps)]

    de_factor = 0
    de_init   = 5
    de_list = [de_init+cnt*de_factor                  for cnt in range(0, num_steps)]

    dt_factor = 1
    dt_init   = 0.25
    dt_list = [dt_init*math.pow(dt_factor,float(cnt)) for cnt in range(0,num_steps)]

    tn_factor = 1.0
    tn_init   = 1
    tn_list = [tn_init*math.pow(tn_factor,float(cnt)) for cnt in range(0,num_steps)]

    test_init = 5
    test_factor = 0;
    test_list = [test_init+cnt*test_factor            for cnt in range(0,num_steps)]

    # NUM MPI PROCESSES
    np_list = [mpi_num_procs  for cnt in range(0, num_steps)]

    # NUM OMP THREADS
    nt_list = [omp_num_threads for cnt in range(0, num_steps)]

    cmd_args = generate_command_args(de_list,
                                     dt_list,
                                     tn_list,
                                     test_list,
                                     mt_list,
                                     np_list,
                                     nt_list,
                                     num_steps)
    utils.execute_commands(cmd_args, 'test2')
Ejemplo n.º 18
0
def test1():
    mpi_num_procs, omp_num_threads = utils.parse_args()
    num_steps = 8
    T_END     = 2*math.pi
    ############################################################################
    # TEST 1: TEMPORAL ERROR
    ############################################################################
    tl_fact = 1#0.1
    tl_init = 1e-5
    tl_list = [tl_init*math.pow(tl_fact,float(cnt)) for cnt in range(0,num_steps)]

    tn_fact = 2;
    tn_init = 50
    tn_list = [tn_init*math.pow(tn_fact,float(cnt)) for cnt in range(0,num_steps)]

    dt_fact = 0.5
    dt_init = T_END/tn_init
    dt_list = [dt_init*math.pow(dt_fact,float(cnt)) for cnt in range(0,num_steps)]


    # NUM MPI PROCESSES
    np_list = [mpi_num_procs  for cnt in range(0, num_steps)]

    # NUM OMP THREADS
    nt_list = [omp_num_threads for cnt in range(0, num_steps)]

    cmd_args = generate_command_args(tl_list,\
                                     dt_list,\
                                     tn_list,\
                                     # de_list,\
                                     # q_list, \
                                     np_list,\
                                     nt_list,\
                                     num_steps)

    utils.execute_commands(cmd_args, 'table1')
Ejemplo n.º 19
0
#!/usr/bin/python

from utils import params, l
from cmd import cmd_runner
from ui import *
from utils import l, parse_args, setup_logging
import sys

class deployer:
		def __init__(self):
			self.cmd = cmd_runner(ui())

try:
	parse_args() # provides global configuration repository 'params'
except Exception, e:
    print "Error parsing command line: " + str(e)
    sys.exit(1)

# setup logging
try:
    setup_logging()
except Exception, e:
    print "Error starting logger: %s" % str(e)
    sys.exit(1)

d = deployer()
d.cmd.run()
Ejemplo n.º 20
0
import utils
#from utils import preprocess
from save_figures import *
from apply_model import apply_model_single_input
from pad import pad_image
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from losses import *

os.environ['FSLOUTPUTTYPE'] = 'NIFTI_GZ'

if __name__ == "__main__":

    ######################## COMMAND LINE ARGUMENTS ########################
    results = utils.parse_args("multiseg")
    num_channels = results.num_channels

    NUM_GPUS = 1
    if results.GPUID == None:
        os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    elif results.GPUID == -1:
        # find maximum number of available GPUs
        call = "nvidia-smi --list-gpus"
        pipe = Popen(call, shell=True, stdout=PIPE).stdout
        available_gpus = pipe.read().decode().splitlines()
        NUM_GPUS = len(available_gpus)
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(results.GPUID)

    model_filename = results.weights
Ejemplo n.º 21
0
        scores_window.append(score)  # save most recent score
        scores.append(score)  # save most recent score
        avg_av = agent.evaluate_on_fixed_set(fixed_states)
        average_action_values.append(avg_av)

        print(f'Episode {i_episode}\tAverage Score: '
              f'{round(np.mean(scores_window),4)}\tEpsilon: {round(eps, 4)}\t'
              f'Average Q value: {round(avg_av, 4)}')

        if i_episode % conf['save_every'] == 0 and i_episode > 0:
            print(f'Saving model at iteration: {i_episode}')
            save_model(conf, agent)

    env.close()

    return {
        'scores': scores,
        'epsilons': epsilons,
        'avg_action_values': average_action_values
    }


if __name__ == '__main__':

    arguments = parse_args()
    pc = arguments.path_config
    exp_conf = read_yaml(pc)

    stats = train(exp_conf)
    save_scores(exp_conf, stats)
Ejemplo n.º 22
0
def conv_spatial():
    ############################################################################
    # TEMPORAL CONVERGENCE TEST FOR ADVECTION
    ############################################################################
    mpi_num_procs, omp_num_threads = utils.parse_args()
    prog = 'advection'
    num_steps = 8

    ##############################
    # TREE TOLERANCE
    ##############################
    tl_fact = 0.1
    tl_init = 1e-1
    tl_list = [tl_init*math.pow(tl_fact,float(cnt)) for cnt in range(0,num_steps)]

    ##############################
    # TIME RESOLUTION
    ##############################
    dt_fact = 1
    dt_init = 1e-3
    dt_list = [dt_init*math.pow(dt_fact,float(cnt)) for cnt in range(0,num_steps)]

    T_END   = 1.0
    tn_fact = 1.0/dt_fact
    tn_init = 1#T_END/dt_init
    tn_list = [tn_init*math.pow(tn_fact,float(cnt)) for cnt in range(0,num_steps)]

    ##############################
    # TREE DEPTH/POINTS
    ##############################
    dp_list  = [15 for cnt in range(0,num_steps)]

    num_pnts = 8**(math.floor(math.log(mpi_num_procs,8)+1))
    pn_list  = [num_pnts      for cnt in range(0,num_steps)]

    ##############################
    # PARALLEL
    ##############################
    mpi_num_procs = mpi_num_procs
    np_list = [mpi_num_procs for cnt in range(0,num_steps)]

    nt = omp_num_threads
    nt_list = [nt for cnt in range(0,num_steps)]

    mrg_type = 3
    mg_list = [mrg_type for cnt in range(0,num_steps)]

    ##############################
    # CHEBYSHEV/CUBIC INTERPOLATION
    ##############################
    cq_list = [14   for cnt in range(0, num_steps)]
    ci_list = [True for cnt in range(0, num_steps)]
    uf_list = [4    for cnt in range(0, num_steps)]

    ##############################
    # VISUALIZATION
    ##############################
    vtk_save_rate = 0
    vs_list = [vtk_save_rate for cnt in range(0,num_steps)]

    cmd_args = OrderedDict()
    cmd_args = utils.generate_commands(
        prog,
        pn_list,
        tl_list,
        dp_list,
        cq_list,
        ci_list,
        uf_list,
        np_list,
        nt_list,
        dt_list,
        tn_list,
        vs_list,
        mg_list)
    utils.execute_commands(cmd_args, 'spatial')
Ejemplo n.º 23
0
from torchvision import transforms
import torchvision.datasets as datasets

from networks import ilsvrc12_archs as archs
from networks import weights_init
from netslim import prune, load_pruned_model, update_bn, update_bn_by_names, \
    network_slimming, get_norm_layer_names
from utils import parse_cifar_args as parse_args
from utils import AverageMeter

import torch.backends.cudnn as cudnn
cudnn.benchmark = True

if __name__ == "__main__":
    # Training settings
    args = parse_args("ilsvrc12")
    print(args)
    os.system('mkdir -p {}'.format(args.output))
    device = torch.device('cuda' if args.cuda else 'cpu')

    if args.seed > 0:
        torch.manual_seed(args.seed)
        if args.cuda:
            torch.cuda.manual_seed(args.seed)

    # Make data loader
    kwargs = {'num_workers': 10, 'pin_memory': False} if args.cuda else {}
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    traindir = os.path.join(args.data_path, 'train')
    train_dataset = datasets.ImageFolder(
Ejemplo n.º 24
0
def test_valid_parse_args_address(address_fixture, default_port_fixture):
    """Compare passed address and default port with returned address and port."""
    parser = parse_args(address_fixture)
    assert parser.address == address_fixture[1] and parser.port == default_port_fixture
Ejemplo n.º 25
0
def test_valid_parse_args_port(default_address_fixture, port_fixture):
    """Compare default address and passed port with returned address and port."""
    parser = parse_args(port_fixture)
    assert parser.address == default_address_fixture and parser.port == int(port_fixture[1])
Ejemplo n.º 26
0
import os

import torch

import torchvision
import utils

if __name__ == "__main__":
    args = utils.parse_args()
    test = utils.load_test(args)
    model = getattr(torchvision.models, test["model"])()
    model.eval()

    example = torch.randn(1, 3, 64, 64)
    script_model = torch.jit.trace(model, example)

    script_model.save(os.environ["MODEL"])
import os
import pickle

import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
import numpy as np
import pandas as pd
from scipy.special import softmax

from tqdm import tqdm
from utils import wln_loss, regio_acc, lr_multiply_ratio, parse_args

args, dataloader, classifier = parse_args(cross_val=True)
reactivity_data = pd.read_csv(args.data_path, index_col=0)

if args.model == 'ml_QM_GNN':
    from ml_QM_GNN.graph_utils.mol_graph import initialize_qm_descriptors
    from predict_desc.predict_desc import predict_desc, reaction_to_reactants
    from predict_desc.post_process import min_max_normalize
    qmdf = predict_desc(args, normalize=False)
else:
    if args.model == 'QM_GNN':
        from QM_GNN.graph_utils.mol_graph import initialize_qm_descriptors
        initialize_qm_descriptors(path=args.desc_path)

df = pd.read_csv(args.data_path, index_col=0)
df = df.sample(frac=1, random_state=1)

# split df into k_fold groups
k_fold_arange = np.linspace(0, len(df), args.k_fold+1).astype(int)
Ejemplo n.º 28
0
                            epoch) + "_" + str(bs) + "_" + str(
                                hidden_dims) + "_" + str(lr) + "_" + str(
                                    wd) + "_" + str(loss_weights) + "_" + str(
                                        do)
                        sys.stdout = open(
                            os.path.join(LOG_FOLDER, "MAE_" + PATH + ".out"),
                            "w")
                        CV(mod1, mod2, encoder1, encoder2, x_train, 5, in_dims,
                           d_dims, hidden_dims, lr, wd, bs, epoch, loss_fns,
                           loss_weights)

                        p_time = time.time()
                        model = train_MAE(mod1, mod2, in_dims, d_dims,
                                          encoder1, encoder2, x_train,
                                          hidden_dims, lr, wd, bs, epoch,
                                          loss_fns, loss_weights, True, PATH)
                        a_time = time.time()
                        print(f'Training time: {a_time-p_time}')
                        testloss = evaluate_MAE(model, x_test, bs, loss_fns,
                                                loss_weights)
                        print(f"Testloss: {testloss}")
                        gc.collect()
                        sys.stdout.close()
                        sys.stdout = stdOrigin


if (__name__ == '__main__'):
    print(sys.argv)
    parameters = parse_args(sys.argv)
    main(*parameters)
Ejemplo n.º 29
0
import math
import argparse
import model
import utils
import data
import os

from utils import Option
opt = Option('./config.json')

utils.init()

formatter = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(formatter_class=formatter)

args, flags = utils.parse_args(opt, parser)

tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tf.compat.v1.set_random_seed(args['random_seed'])

def rampup(epoch):
    if epoch < args['rampup_length']:
        p = max(0.0, float(epoch)) / float(args['rampup_length'])
        p = 1.0 - p
        return math.exp(-p * p * 5.0)
    else:
        return 1.0

def rampdown(epoch):
    if epoch >= (args['n_epochs'] - args['rampdown_length']):
        ep = (epoch - (args['n_epochs'] - args['rampdown_length'])) * 0.5
Ejemplo n.º 30
0
    return Context(args=args, trader=None)


if __name__ == '__main__':
    commonParser = argparse.ArgumentParser(add_help=False)
    commonParser.add_argument('--csv', type=str, help='read csv file')
    commonParser.add_argument('--reverse', action=argparse.BooleanOptionalAction, default=False,
                              help='reverse csv data')
    commonParser.add_argument('--csv-delimiter', default=',', type=str, help='csv delimiter', dest='csvDelimiter')

    strategyParser = argparse.ArgumentParser(description='Start back test.', parents=[commonParser])
    strategyParser.add_argument('--strategy', dest='strategy', type=str, nargs='?',
                                help='choose a strategy to test')

    args, unknown = strategyParser.parse_known_args()
    strategy_args = {parse_args(k): v for k, v in zip(unknown[::2], unknown[1::2])}
    logger.info(f'Test [{args.strategy}] strategy with args:')
    print_json(strategy_args)

    strategy = __import__(f'strategy.{args.strategy}', fromlist=True)

    context = build_context(strategy_args)
    instance = getattr(strategy, 'create')(context)

    cerebro = bt.Cerebro(stdstats=False)
    cerebro.addstrategy(Proxy, args={
        'strategy': instance
    })
    datapath = (args.csv)

    dataframe = pd.read_csv(
Ejemplo n.º 31
0
from torch.utils.data import Dataset
import main, utils
import torch.optim as optim
import sys
import numpy as np
import os.path as osp
from datetime import date
import train

import pdb

device = 'cuda' if torch.cuda.is_available() else 'cpu'

if __name__ == '__main__':

    opt = utils.parse_args()

    if True:
        if opt.glove:
            queryset = utils.load_glove_data('query').to(utils.device)
            neighbors = utils.load_glove_data('answers').to(utils.device)
        elif opt.sift:
            queryset = utils.load_sift_data('query').to(utils.device)
            neighbors = utils.load_sift_data('answers').to(utils.device)
        else:
            queryset = utils.load_data('query').to(utils.device)
            neighbors = utils.load_data('answers').to(utils.device)
    else:
        queryset = utils.load_data('train').to(utils.device)
        dist = utils.l2_dist(queryset)
        dist += 2 * torch.max(dist).item() * torch.eye(
Ejemplo n.º 32
0
from convert import trajectory_ik, trajectory_fk, timing_report, reaching_report
from approxik import ApproxInvKin, ExactInvKin


def line1(P, turns=5, max_radius=1.0, height=0.8):
    n_steps = P.shape[0]
    x = np.linspace(-np.pi * turns, np.pi, n_steps)
    r = np.linspace(0, max_radius, n_steps)
    P[:, 0] = np.sin(x) * r
    P[:, 1] = np.cos(x) * r
    P[:, 2] = height
    P[:, 3] = 1.0


if __name__ == "__main__":
    filename, base_link, ee_link = parse_args()

    aik = ApproxInvKin(filename, base_link, ee_link, 1.0, 0.001, verbose=0)
    eik = ExactInvKin(filename, base_link, ee_link, 1e-4, 200, verbose=0)

    P = np.zeros((1000, 7))
    line1(P, max_radius=0.6)

    Qa, timings = trajectory_ik(P, aik)
    reaching_report(P, Qa, aik, label="Approximate IK")
    timing_report(timings, "Approximate IK")
    Pa = trajectory_fk(Qa, aik)
    Qe, timings, reachable = trajectory_ik(P, eik, return_reachable=True)
    timing_report(timings, "Exact IK")
    reaching_report(P, Qe, eik, label="Exact IK")
    Pe = trajectory_fk(Qe, eik)
Ejemplo n.º 33
0
"""
Compete with best model
"""
import os

from go_game import GoGame
from NNet import NetTrainer
from play import compete_random_greedy
from utils import parse_args

if __name__ == "__main__":
    ARGS = parse_args()
    GAME = GoGame(13, 7.5)

    OLD_WIN_COUNT, NEW_WIN_COUNT, black_win, white_win = compete_random_greedy(
        GAME, ARGS)

    # if not os.path.exists('../compete_results'):
    # os.makedirs('../compete_results')

    print('Random {} Greedy {}'.format(OLD_WIN_COUNT, NEW_WIN_COUNT),
          flush=True)

    # with open('../compete_results/' + str(ARGS.thread_num) + '.txt', 'w') as file:
    # file.write(str(OLD_WIN_COUNT) + ' ' + str(NEW_WIN_COUNT)+' '+ str(black_win) +' '+ str(white_win))
Ejemplo n.º 34
0
def main():
    args = parse_args()

    # specifies the path where the biobert or clinical bert model is saved
    if args.bert_model == 'biobert' or args.bert_model == 'clinical_bert':
        args.bert_model = args.model_loc

    print(f"Using bert model: {args.bert_model}")

    device = torch.device(args.device if torch.cuda.is_available() else "cpu")
    n_gpu = torch.cuda.device_count()
    logger.info(f"device: {device} n_gpu: {n_gpu}")

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    if not args.do_train and not args.do_eval:
        raise ValueError(
            "At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(args.output_dir) and os.listdir(
            args.output_dir) and args.do_train:
        raise ValueError(
            "Output directory ({}) already exists and is not empty.".format(
                args.output_dir))
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    processor = N2c2ClsProcessor(args.fold_id)
    num_labels = 13
    label_list = processor.get_labels()

    tokenizer = BertTokenizer.from_pretrained(args.bert_model,
                                              do_lower_case=args.do_lower_case)

    print('TRAIN')
    train = processor.get_train_examples(args.data_dir)
    print([(train[i].text_a, train[i].text_b, train[i].label)
           for i in range(3)])
    print('DEV')
    dev = processor.get_dev_examples(args.data_dir)
    print([(dev[i].text_a, dev[i].text_b, dev[i].label) for i in range(3)])
    print('TEST')
    test = processor.get_test_examples(args.data_dir)
    print([(test[i].text_a, test[i].text_b, test[i].label) for i in range(3)])

    train_examples = None
    num_train_optimization_steps = None
    if args.do_train:
        train_examples = processor.get_train_examples(args.data_dir)
        num_train_optimization_steps = int(
            len(train_examples) /
            args.train_batch_size) * args.num_train_epochs

    # Prepare model
    cache_dir = args.cache_dir if args.cache_dir else PYTORCH_PRETRAINED_BERT_CACHE
    model = BertForSequenceClassification.from_pretrained(
        args.bert_model, cache_dir=cache_dir, num_labels=num_labels)
    model.to(device)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=args.learning_rate,
                         warmup=args.warmup_proportion,
                         t_total=num_train_optimization_steps)

    global_step = 0
    nb_tr_steps = 0
    tr_loss = 0
    if args.do_train:
        train_features = convert_examples_to_features(train_examples,
                                                      label_list,
                                                      args.max_seq_length,
                                                      tokenizer)
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)
        all_input_ids = torch.tensor([f.input_ids for f in train_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features],
                                       dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in train_features],
                                     dtype=torch.long)
        train_data = TensorDataset(all_input_ids, all_input_mask,
                                   all_segment_ids, all_label_ids)
        train_sampler = RandomSampler(train_data)
        train_dataloader = DataLoader(train_data,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)

        model.train()
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, label_ids = batch
                loss = model(input_ids, segment_ids, input_mask, label_ids)

                loss.backward()

                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1

                optimizer.step()
                optimizer.zero_grad()
                global_step += 1

        # Save a trained model and the associated configuration
        model_to_save = model.module if hasattr(
            model, 'module') else model  # Only save the model it-self
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
        torch.save(model_to_save.state_dict(), output_model_file)
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
        with open(output_config_file, 'w') as f:
            f.write(model_to_save.config.to_json_string())

        # Load a trained model and config that you have fine-tuned
        config = BertConfig(output_config_file)
        model = BertForSequenceClassification(config, num_labels=num_labels)
        model.load_state_dict(torch.load(output_model_file))
    else:
        model = BertForSequenceClassification.from_pretrained(
            args.bert_model, num_labels=num_labels)
    model.to(device)

    if args.do_eval:
        eval_examples = processor.get_dev_examples(args.data_dir)
        eval_features = convert_examples_to_features(eval_examples, label_list,
                                                     args.max_seq_length,
                                                     tokenizer)
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)
        all_input_ids = torch.tensor([f.input_ids for f in eval_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in eval_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in eval_features],
                                       dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in eval_features],
                                     dtype=torch.long)
        eval_data = TensorDataset(all_input_ids, all_input_mask,
                                  all_segment_ids, all_label_ids)
        # Run prediction for full data
        eval_sampler = SequentialSampler(eval_data)
        eval_dataloader = DataLoader(eval_data,
                                     sampler=eval_sampler,
                                     batch_size=args.eval_batch_size)

        model.eval()
        eval_loss, eval_accuracy = 0, 0
        nb_eval_steps, nb_eval_examples = 0, 0
        pred = []

        for input_ids, input_mask, segment_ids, label_ids in tqdm(
                eval_dataloader, desc="Evaluating"):
            input_ids = input_ids.to(device)
            input_mask = input_mask.to(device)
            segment_ids = segment_ids.to(device)
            label_ids = label_ids.to(device)

            with torch.no_grad():
                tmp_eval_loss = model(input_ids, segment_ids, input_mask,
                                      label_ids)
                logits = model(input_ids, segment_ids, input_mask)
                logits = torch.softmax(logits, 1)

            logits = logits.detach().cpu().numpy()
            label_ids = label_ids.to('cpu').numpy()
            tmp_eval_accuracy = accuracy(logits, label_ids)
            pred += logits.tolist()

            eval_loss += tmp_eval_loss.mean().item()
            eval_accuracy += tmp_eval_accuracy

            nb_eval_examples += input_ids.size(0)
            nb_eval_steps += 1

        eval_loss = eval_loss / nb_eval_steps
        eval_accuracy = eval_accuracy / nb_eval_examples
        loss = tr_loss / nb_tr_steps if args.do_train else None

        pred = {f.guid: p for f, p in zip(eval_features, pred)}

        result = {
            'eval_loss': eval_loss,
            'eval_accuracy': eval_accuracy,
            'global_step': global_step,
            'loss': loss
        }

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")
            for key in sorted(result.keys()):
                logger.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))

        output_pred_file = os.path.join(args.output_dir, "pred_results.txt")
        with open(output_pred_file, 'w') as writer:
            logger.info("***** Writing Eval predictions *****")
            for id, p in pred.items():
                writer.write(f"{id}:{p}\n")

    if args.do_test and (args.local_rank == -1
                         or torch.distributed.get_rank() == 0):
        test_examples = processor.get_test_examples(args.data_dir)
        test_features = convert_examples_to_features(test_examples, label_list,
                                                     args.max_seq_length,
                                                     tokenizer)
        logger.info("***** Running testing *****")
        logger.info("  Num examples = %d", len(test_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)
        all_input_ids = torch.tensor([f.input_ids for f in test_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in test_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in test_features],
                                       dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in test_features],
                                     dtype=torch.long)
        test_data = TensorDataset(all_input_ids, all_input_mask,
                                  all_segment_ids, all_label_ids)
        # Run prediction for full data
        test_sampler = SequentialSampler(test_data)
        test_dataloader = DataLoader(test_data,
                                     sampler=test_sampler,
                                     batch_size=args.eval_batch_size)

        model.eval()
        test_loss, test_accuracy = 0, 0
        nb_test_steps, nb_test_examples = 0, 0

        for input_ids, input_mask, segment_ids, label_ids in tqdm(
                test_dataloader, desc="Testing"):
            input_ids = input_ids.to(device)
            input_mask = input_mask.to(device)
            segment_ids = segment_ids.to(device)
            label_ids = label_ids.to(device)

            with torch.no_grad():
                tmp_test_loss = model(input_ids, segment_ids, input_mask,
                                      label_ids)
                logits = model(input_ids, segment_ids, input_mask)

            logits = logits.detach().cpu().numpy()
            label_ids = label_ids.to('cpu').numpy()
            tmp_test_accuracy = accuracy(logits, label_ids)

            test_loss += tmp_test_loss.mean().item()
            test_accuracy += tmp_test_accuracy

            nb_test_examples += input_ids.size(0)
            nb_test_steps += 1

        test_loss = test_loss / nb_test_steps
        test_accuracy = test_accuracy / nb_test_examples
        loss = tr_loss / nb_tr_steps if args.do_train else None
        result = {
            'test_loss': test_loss,
            'test_accuracy': test_accuracy,
            'global_step': global_step,
            'loss': loss
        }

        output_test_file = os.path.join(args.output_dir, "test_results.txt")
        with open(output_test_file, "w") as writer:
            logger.info("***** Test results *****")
            for key in sorted(result.keys()):
                logger.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))
Ejemplo n.º 35
0
def main():

    args = parse_args()

    course_link = args.course_url[0]
    path = args.path
    overwrite = args.overwrite

    regexs = [
        r"(?:https?://)study.163.com/course/introduction/(?P<courseid>\d+)\.htm",
        r"(?:https?://)study.163.com/course/courseMain.htm\?courseId=(?P<courseid>\d+)",
        r"(?:https?://)study.163.com/course/introduction.htm\?courseId=(?P<courseid>\d+)",
    ]

    for regex in regexs:
        m = re.match(regex, course_link)
        if m is not None:
            break

    if m is None:
        print("The URL provided is not valid for study.163.com")
        sys.exit(0)

    path = os.path.join(path, m.group("courseid"))
    headers = {
        "Accept": "*/*",
        "Accept-Encoding": "gzip, deflate, sdch",
        "Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4",
        "Connection": "keep-alive",
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
        "X-Requested-With": "ShockwaveFlash/16.0.0.235",
    }

    post_data = {
        "callCount": 1,
        "scriptSessionId": "${scriptSessionId}190",
        "c0-scriptName": "PlanNewBean",
        "c0-methodName": "getPlanCourseDetail",
        "c0-id": 0,
        "c0-param0": "string:" + m.group("courseid"),
        "c0-param1": "number:0",
        "c0-param2": "null:null",
        "batchId": 434820,  # arbitrarily
    }
    course_detail_dwr_url = "http://study.163.com/dwr/call/plaincall/PlanNewBean.getPlanCourseDetail.dwr"

    session = requests.Session()
    session.headers.update(headers)
    r = session.post(course_detail_dwr_url, data=post_data)

    if r.status_code is not 200:
        print("Failed to get .dwr file.")
        sys.exit(0)

    print("Parsing...", end="")

    syllabus = parse_syllabus_study163(session, r.content)

    if syllabus:
        print("Done.")
    else:
        print("Failed. No course content on the page.")
        sys.exit(0)

    download_syllabus_study163(session, syllabus, path, overwrite)
Ejemplo n.º 36
0
            elif params.n_shot == 5:
                params.stop_epoch = 400
            else:
                params.stop_epoch = 600  # default

    base_datamgr = SimpleDataManager(image_size, batch_size=16)
    base_loader = base_datamgr.get_data_loader(base_file, aug=params.train_aug)
    val_datamgr = SimpleDataManager(image_size, batch_size=64)
    val_loader = val_datamgr.get_data_loader(val_file, aug=False)

    return base_loader, val_loader


if __name__ == '__main__':
    np.random.seed(10)
    params = parse_args('train')

    params.checkpoint_dir = '%s/checkpoints/%s/%s_%s' % (
        configs.save_dir, params.dataset, params.model, params.method)
    if params.train_aug:
        params.checkpoint_dir += '_aug'
    if not params.method in ['baseline', 'baseline++']:
        params.checkpoint_dir += '_%dway_%dshot' % (params.train_n_way,
                                                    params.n_shot)

    if not os.path.isdir(params.checkpoint_dir):
        os.makedirs(params.checkpoint_dir)

    base_loader, val_loader = get_loader(params)
    model = get_model(params)
    model = model.cuda()
Ejemplo n.º 37
0

def transform(src: Path, dst: PathLike):
    # Import the Universal Sentence Encoder's TF Hub module
    embed = hub.Module(MODULE_URL)
    tf.logging.set_verbosity(tf.logging.ERROR)
    logging.debug('Transforming')
    test_corpus = read_corpus(src)
    with tf.Session() as session:
        session.run(
            [tf.global_variables_initializer(),
             tf.tables_initializer()])
        x = session.run(embed(test_corpus))
    logging.debug('Save to %s', dst)
    np.savez(dst, x=x)


def read_universal_sentence(filename):
    npzfile = np.load(filename)
    return npzfile['x']


if __name__ == '__main__':
    argv = parse_args(__doc__)

    # pick_device()

    output = to_path(argv['--output'])
    if not argv['--skip'] or not output.exists():
        transform(to_path(argv['--input']), output)
Ejemplo n.º 38
0
    get_logger,
    write_lines,
    read_lines,
)

# TODO only do parts of pipeline
opts = parse_args(
    arg('-name', default='fr-clean'),
    arg('-f', '--file', default='train/french_clean'),
    # if you want 18th word do 17
    arg('-i', '--start', type=int, default=76),
    arg('-j', '--stop', type=int),
    arg('-q', '--query', nargs='*'),
    arg('-s', '--src', default='fr'),
    arg('-t', '--target', default='en'),
    arg('-n', '--n-img', type=int, default=20),

    # pipeline
    arg('-is', '--sch', action='store_true'),
    arg('-rs', '--rsch', action='store_true'),
    arg('-pred', action='store_true'),

    # use saved
    arg('-load-urls'),
    arg('-load-preds'),
)

name = opts.name + '__' if is_(opts.name) else ''
RESULT_PREFIX = osp.join('reverse-img-final-preds',
                         '%s_to_%s' % (opts.src, opts.target),
                         name + time_stamp())
mkdir_p(RESULT_PREFIX)
Ejemplo n.º 39
0
def test_valid_parse_args(address_fixture, port_fixture):
    """Compare passed address and port with returned address and port."""
    parser = parse_args((*address_fixture, *port_fixture))
    assert parser.address == address_fixture[1] and parser.port == int(port_fixture[1])
Ejemplo n.º 40
0
def main():
    args = parse_args()

    if args.username is None:
        print ('No username specified.')
        sys.exit(1)
    if args.password is None:
        print ('No password specified.')
        sys.exit(1)

    user_email = args.username
    user_pswd = args.password
    course_link = args.course_url[0]
    path = args.path
    overwrite = args.overwrite

    regex = r'(?:https?://)(?P<site>[^/]+)/(?P<baseurl>[^/]+)/(?P<coursename>[^/]+)/?'
    m = re.match(regex, args.course_url[0]) 
    if m is None:
        print ('The URL provided is not valid for icourse163.')
        sys.exit(0)

    md = md5.new()
    md.update(user_pswd)
    encryptedpswd =  md.hexdigest()

    if m.group('site') in ['www.icourse163.org']:
        login_data = {
                'product': 'imooc',
                'url': 'http://www.icourse163.org/mooc.htm?#/index',
                'savelogin': 1,
                'domains': 'icourse163.org',
                'type': 0,
                'append': 1,
                'username': user_email,
                'password': encryptedpswd
                }
        login_success_flag = '正在登录,请稍等...'
        web_host = 'www.icourse163.org'
        regex_loc = 'window.location.replace\(\"(http:\/\/reg\.icourse163\.org\/next\.jsp.+)\"\)'
    elif m.group('site') in [ 'mooc.study.163.com']:
        login_data = {
                'product': 'study',
                'url': 'http://study.163.com?from=study',
                'savelogin': 1,
                'domains': '163.com',
                'type': 0,
                'append': 1,
                'username': user_email,
                'password': encryptedpswd
                }        
        login_success_flag = '登录成功,正在跳转'
        web_host = 'mooc.study.163.com'
        regex_loc = 'window.location.replace\(\"(http:\/\/study\.163\.com\?from=study)\"\)'
    else:
        print ('The URL provided is not valid for icourse163.')
        sys.exit(0)
    path = os.path.join(path, clean_filename(m.group('coursename')))

    login_url = 'https://reg.163.com/logins.jsp'

    headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
                'Accept': 'application/json, text/javascript, */*; q=0.01',
                'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',
                'Connection': 'keep-alive',
               }


    session = requests.Session()
    session.headers.update(headers)
    r1 = session.post(login_url, data=login_data)

    
    success = re.search(login_success_flag, r1.content)
    if not success:
        print ('Fail to login.')
        exit(2)
    else:
        print ('Login done...')
    
    se = re.search(regex_loc, r1.content)
        
    r = session.get(se.group(1), allow_redirects=True, cookies = {'NTES_PASSPORT':session.cookies['NTES_PASSPORT']})

    # get course id, it's in cid.group(1)
    r2 = session.get(course_link)
    cid = re.search(r'window\.termDto = {             id:([0-9]+),', r2.content)
    if cid is None:
        cid = re.search(r'termId : \"([0-9]+)\",', r2.content)


    headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
                'Accept': '*/*' ,
                'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',
                'Connection': 'keep-alive',
                'Content-Type': 'text/plain',
                'Cookie': 'STUDY_SESS=%s; '% session.cookies['STUDY_SESS'],
                'Host': web_host,
               }

    session.headers.update(headers)

    params =  {
                'callCount':1,
                'scriptSessionId':'${scriptSessionId}190',
                'httpSessionId':'e8890caec7fe435d944c0f318b932719',
                'c0-scriptName':'CourseBean',
                'c0-id': 0,
                'c0-methodName':'getLastLearnedMocTermDto',
                'c0-param0':'number:' + cid.group(1),
                'batchId':434820, #arbitrarily
                }

    getcourse_url = 'http://www.icourse163.org/dwr/call/plaincall/CourseBean.getLastLearnedMocTermDto.dwr'

    r3 = session.post(getcourse_url,data = params)

    print ('Parsing...', end="")

    syllabus = parse_syllabus_icourse163(session, r3.content)

    if syllabus:
        print ('Done.')
    else:
        print ('Failed. No course content on the page.')
        sys.exit(0)

    print ('Save files to %s' % path)

    download_syllabus_icourse163(session, syllabus, path)
    def run(self, argv):
        """Make API self.classes

        Parse Swagger JSON files and make API classes.

        Uses:
        copyright_notice.bit
        proper_object_def.proto
        proper_object_method_def.proto

        """
        args = parse_args(argv)
        methods_to_move = ['get', 'gets']
        asterisk_class = None
        if ((args['dir'] is None or args['dir'] == '')
                and (args['resource'] is None or args['resource'] == '')) \
                or args['lang'] is None or args['lang'] == '':
            print "Usage: ./generate_library --lang=language ", \
                  "[--dir=/path/to/resources/ | ", \
                  "--resource=", \
                  "http://localhost:8088/stasis/api-docs/resources.json] "
            return 1

        self.lang_tools = __import__(args['lang'])

        def remove_moved(method):
            """Remove get* methods from this class and add to Asterisk"""
            if method.method_name in methods_to_move:
                # Add these to the Asterisk class instead
                asterisk_class.methods.append(method)
                return False
            else:
                return True

        for class_ in self.classes:
            if class_.class_name == "Asterisk":
                asterisk_class = class_
            class_.methods[:] = [m for m in class_.methods if remove_moved(m)]

        template_copyright = get_file_content(
            '%s/templates/copyright.proto' % (args['lang'])
        ) + '\n'

        if args['dir']:
            self.get_resources_from_dir(args['dir'], args['lang'])
        elif args['resource']:
            self.get_resources_from_url(args['resource'], args['lang'])

        if len(self.classes) == 0:
            print "No resources found. Are you using Asterisk 12 or later?"
            return 1

        self.classes = sorted(self.classes, cmp=sort_asterisk_first)

        for class_ in self.classes:
            method_texts = []
            print "Generating class %s" % (class_.class_name)
            class_def = class_.construct_file_contents()

            for method in class_.methods:
                if method.method_name in methods_to_move:
                    if class_.class_name != 'Asterisk':
                        continue
                    else:
                        # Rename from get/gets to get_channel, get_channels
                        method.method_name = re.sub('(s*)$', r'_%s\1'
                                                    % (method.file_name),
                                                    method.method_name)
                        method.file_name = 'asterisk'

                print "  method %s.%s" \
                    % (class_.class_name, method.method_name)
                filebit = method.construct_file_contents()
                method_texts.append(filebit)

            methods_blob = '\n\n'.join(method_texts)
            if methods_blob != '':
                # Handle different number of newlines if we have no methods
                # to add.
                methods_blob = '\n' + methods_blob

            class_def = re.sub('\{CLASS_METHODS\}', methods_blob, class_def)
            file_contents = '\n\n'.join([template_copyright, class_def])
            file_contents = self.lang_tools.wrap(file_contents)
            write_file('%s/lib/%s.%s' % (args['lang'], class_.file_name,
                       self.lang_tools.FILE_EXTENSION), file_contents)

        license_content = get_file_content('LICENSE')
        write_file('%s/lib/LICENSE' % args['lang'], license_content)
        np.array(response_list_2d[img3_idx]).reshape(1, -1))

    print("Input Image Num: ", index_list[img1_idx], " Output image nums: ",
          index_list[closest_to_img1[1][0,
                                        0]], index_list[closest_to_img1[1][0,
                                                                           1]],
          index_list[closest_to_img1[1][0,
                                        2]], index_list[closest_to_img1[1][0,
                                                                           3]])
    print("Input Image Num: ", index_list[img2_idx], " Output image nums: ",
          index_list[closest_to_img2[1][0,
                                        0]], index_list[closest_to_img2[1][0,
                                                                           1]],
          index_list[closest_to_img2[1][0,
                                        2]], index_list[closest_to_img2[1][0,
                                                                           3]])
    print("Input Image Num: ", index_list[img3_idx], " Output image nums: ",
          index_list[closest_to_img3[1][0,
                                        0]], index_list[closest_to_img3[1][0,
                                                                           1]],
          index_list[closest_to_img3[1][0,
                                        2]], index_list[closest_to_img3[1][0,
                                                                           3]])

    # print(closest_to_img1)
    # VOCDataset.get_class_name(closest_to_img1[0])


if __name__ == "__main__":
    args, device = utils.parse_args()
    main()
Ejemplo n.º 43
0
        # Restore confusion matrix
        metrics['confusion matrix'] = cm

    @staticmethod
    def _get_train_summary_op(graph, tag_prefix='train/'):
        loss = graph.get_tensor_by_name(LOSS_TENSOR)
        loss_summary = tf.summary.scalar(tag_prefix + 'loss', loss)
        return loss_summary


if __name__ == '__main__':
    # Suppress most console output
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

    mode, args = parse_args(sys.argv[1:])

    # Create full paths
    meta_file = os.path.join(args.model_dir, args.meta_name)
    ckpt = os.path.join(args.model_dir, args.ckpt_name)

    # Create runner
    if mode == 'train':
        augmentation_kwargs = dict(
            max_bbox_jitter=args.max_bbox_jitter,
            max_rotation=args.max_rotation,
            max_shear=args.max_shear,
            max_pixel_shift=args.max_pixel_shift,
            max_pixel_scale_change=args.max_pixel_scale_change)
    else:
        augmentation_kwargs = {}
Ejemplo n.º 44
0
    print kwargs
    # Apply the RNN to the inputs
    h = rnn.apply(low_memory=True, **kwargs)

    fork.initialize()

    rnn.weights_init = initialization.Orthogonal()
    rnn.biases_init = initialization.Constant(0)
    rnn.initialize()

    f_h = theano.function([x], h)
    return f_pre_rnn, f_h

if __name__ == "__main__":
    args = parse_args()

    dataset = args.dataset

    mini_batch_size = 2
    time_length = 10

    # Prepare data
    train_stream, valid_stream, vocab_size = get_minibatch_char(
        dataset, mini_batch_size, time_length, args.tot_num_char)

    f_pre_rnn, f_h = build_fork_lookup(vocab_size, time_length, args)
    data = next(train_stream.get_epoch_iterator())[1]
    print(data)

    pre_rnn = f_pre_rnn(data)
Ejemplo n.º 45
0
def conv_temporal_spatial_long_time():
    ############################################################################
    # TEST 2: CONVERGENCE TEST FOR ADVECTION-DIFFUSION SOLVER
    ############################################################################
    mpi_num_procs, omp_num_threads = utils.parse_args()
    # prog          = 'advdiff-ss'
    # prog          = 'advdiff-ss-tv'
    prog          = 'advdiff-ss-tv-extrap'

    dt            = 0.0628
    vtk_save_rate = 0
    mrg_type      = 3
    np            = mpi_num_procs
    num_pnts      = 8**(math.floor(math.log(np,8)+1))
    nt            = omp_num_threads

    # UNIFORM
    # dp_list = [5    , 6    , 7    ]
    # cq_list = [3    , 3    , 3    ]
    # ci_list = [True , True , True ]
    # uf_list = [2    , 2    , 2    ]
    # dt_list = [dt   , dt/2 , dt/4 ]
    # tn_list = [100  , 200  , 400  ]
    # num_steps = len(dp_list)
    # tl_list = [1e-30         for cnt in range(0,num_steps)]

    # ADAPTIVE
    # LOW ORDER
    tl_list = [1e-02, 1e-03, 1e-04 ]
    dp_list = [15   , 15   , 15    ]
    cq_list = [3    , 3    , 3     ]
    ci_list = [True , True , True  ]
    uf_list = [2    , 2    , 2     ]
    dt_list = [dt   , dt/2 , dt/3  ]
    tn_list = [100  , 200  , 300   ]

    # HIGH ORDER
    # tl_list = [1e-03, 1e-04, 1e-05, 1e-6   ]
    # dp_list = [15   , 15   , 15   , 15     ]
    # cq_list = [14   , 14   , 14   , 14     ]
    # ci_list = [True , True , True , True   ]
    # uf_list = [2    , 2    , 2    , 2      ]
    # dt_list = [dt/4 , dt/8 , dt/16, dt/32 ]
    # tn_list = [400  , 800  , 1600 , 3200  ]


    num_steps = len(dp_list)
    pn_list = [num_pnts      for cnt in range(0,num_steps)]
    np_list = [np            for cnt in range(0,num_steps)]
    nt_list = [nt            for cnt in range(0,num_steps)]
    mg_list = [mrg_type      for cnt in range(0,num_steps)]
    vs_list = [vtk_save_rate for cnt in range(0,num_steps)]
    tt_list = [11            for cnt in range(0,num_steps)]

    cmd_args = OrderedDict()
    cmd_args = utils.generate_commands(
        prog,
        pn_list,
        tl_list,
        dp_list,
        cq_list,
        ci_list,
        uf_list,
        np_list,
        nt_list,
        dt_list,
        tn_list,
        vs_list,
        mg_list,
        tt_list)
    utils.execute_commands(cmd_args, 'temporal-spatial-long-time')
Ejemplo n.º 46
0
        return Link_Pred_Tasker(args, dataset)
    elif args.task == 'edge_cls':
        return Edge_Cls_Tasker(args, dataset)
    elif args.task == 'node_cls':
        return Node_Cls_Tasker(args, dataset)

def build_classifier(args, tasker):
    if 'node_cls' == args.task:
        mult = 1
    else:
        mult = 2
    return Classifier(args, in_features=args.gcn_out_feats * mult, out_features=tasker.num_classes).to(args.device)

if __name__ == '__main__':
    parser = u.create_parser()
    args = u.parse_args(parser)

    args.device = 'cuda'

    dataset = build_dataset(args)
    tasker = build_tasker(args, dataset)
    splitter = splitter(args, tasker)
    gcn = EGNNC(args.feats_per_node,
                args.hidden_feats,
                args.gcn_out_feats,
                args.num_hist_steps,
                tasker.data.num_nodes).to(args.device)
    classifier = build_classifier(args, tasker)
    loss = ce.Cross_Entropy(args, dataset).to('cuda')
    trainer = Trainer(args,
                      splitter=splitter,
Ejemplo n.º 47
0
                            print >>f_index, '\t"%s" : ' % word
                            print >>f_index, json.dumps(index, sort_keys=True, indent=2,
                                                        ensure_ascii=False, encoding='utf-8'), ','
                        else:
                            print >>f_index, u'%s\t%s' % (word, u' '.join( u'%s,%d,%d' % (k,v[0],v[1]) if k in ["ids","lens"]
                                                                        else u'%s,%s' % (k,v)  for k,v in index.items()))
            if use_json: print >>f_index, '\n\n"" : [] }\n'

    with codecs.open('error.txt', 'a+', encoding='utf-8') as f_err:
        print >>f_err, "\n\n%d, %.3f sec" % (quantity, time.time() - start_time)
    return


import utils
import fib_archive
import s9_archive


if __name__ == '__main__':

    args = utils.parse_args()

    if   args.fib:
        archiver = fib_archive.FibonacciArchiver(args.fib)
    elif args.s9:
        archiver =  s9_archive.Simple9Archiver()

    reshape(args.dat_name, args.ndx_name, args.bin_name, args.len_name,
            archiver=archiver, use_hashes=args.use_hashes)

Ejemplo n.º 48
0
 def decode_instr(self, args):
     self.src_op, self.dest_op = utils.parse_args(args)
Ejemplo n.º 49
0
def generate_command_args(max_depth, mpi_num_procs, omp_num_threads):

    EXEC = os.path.join(utils.TBSLAS_EXAMPLES_BIN_DIR, "advdiff-ss")
    # generate a dictionary data type of commands
    cmd_args = OrderedDict()
    cmd_id = 1
    ARGS    = ['-N'   , '2048', \
               '-tol' , '1e-5', \
               '-d'   , str(max_depth), \
               '-dt'  , '0.125', \
               '-tn'  , '5', \
               '-test', str(4), \
               '-omp' , str(omp_num_threads), \
               ]
    cmd_args[cmd_id] = utils.determine_command_prefix(mpi_num_procs) + [EXEC] + ARGS
    return cmd_args

################################################################################
# MAIN
################################################################################
if __name__ == '__main__':
    mpi_num_procs, omp_num_threads = utils.parse_args()
    max_depth = 7

    ############################################################################
    # TEST 1: TEMPORAL CONVERGENCE
    ############################################################################
    cmd_args = generate_command_args(max_depth, mpi_num_procs, omp_num_threads)
    utils.execute_commands(cmd_args, 'vis-advdiff')
Ejemplo n.º 50
0
mw = MainWindow(width, height, Screen)
ep.mw = mw

        
# GTK mumbo-jumbo to show the widget in a window and quit when it's closed
def run():
    mw.run()

if __name__ == "__main__":
    usage = "pythone tileset_editor.py [-h] [--reexport] [--project <path>] [--out <path>]"
    if len(sys.argv) > 1:
        description = {"-h": {"arglen": 0, "arg": None, "present": False},
                       "--reexport": {"arglen": 0, "arg": None, "present": False},
                       "--project": {"arglen": 1, "arg": None, "present": False},
                       "--out": {"arglen": 1, "arg": None, "present": False}}
        if utils.parse_args(sys.argv, description) == True:
            if description["-h"]["present"]:
                print usage
                exit(0)
            elif description["--reexport"]["present"]:
                if description["--project"]["present"] and description["--out"]["present"]:
                    project_path = description["--project"]["arg"]
                    out_path = description["--out"]["arg"]
                    print "Loading project from:", project_path
                    project.load(project_path)
                    print "Exporting project to:", out_path
                    state.export(out_path)
                    exit(0)
    run()
Ejemplo n.º 51
0
 def test_parse_args__valid(self):
     """with valid args."""
     dummy_request = dummy({'price': 1000, 'downpayment': 10})
     result = utils.parse_args(dummy_request, params)
     self.assertTrue('results' in result)
     self.assertTrue(result['results']['price'], 1000)
Ejemplo n.º 52
0
def test_valid_parse_args_defaults(default_address_fixture, default_port_fixture, address_fixture, port_fixture):
    """Compare passed and returned address and port with default address and port."""
    parser = parse_args((*address_fixture, *port_fixture))
    assert parser.address != default_address_fixture and parser.port != default_port_fixture
Ejemplo n.º 53
0
Archivo: glad.py Proyecto: kaniblu/vhda
    parser.add_str("data-format", default="json",
                   choices=("woz", "json", "dstc"),
                   help="Data format of the data to be loaded.")
    parser.add_pth("glad-dir", is_dir=True, must_exist=True,
                   default=(pathlib.Path(__file__).parent
                            .joinpath("../dst/glad").absolute()),
                   help="Directory to an existing glad codebase.")
    parser.add_pth("save-dir", is_dir=True, default="out-glad",
                   help="Directory for saving output files.")
    parser.add_int("max-epochs", min_bound=1, default=50,
                   help="Maximum epochs to train models.")
    parser.add_int("batch-size", min_bound=1, default=50,
                   help="Mini-batch size during stochastic gd.")
    parser.add_flt("emb-dropout", default=0.2,
                   help="Embedding dropout.")
    parser.add_flt("local-dropout", default=0.2,
                   help="Local dropout.")
    parser.add_flt("global-dropout", default=0.2,
                   help="Global dropout.")
    parser.add_str("early-stop-criterion", default="joint_goal",
                   choices=("joint_goal", "turn_inform",
                            "turn_request", "hmean"))
    parser.add_int("seed",
                   help="Random seed.")
    parser.add_int("gpu", help="Index of specific GPU device to use.")
    return parser


if __name__ == "__main__":
    main(utils.parse_args(create_parser()))
                                      trn_dataset.tokenizer,
                                      clean=False)
        del model

    # calc training stats
    fold_best_metric_mean = np.mean(fold_best_metrics)
    fold_best_metric_std = np.std(fold_best_metrics)
    fold_stats = f'{EXP_ID} : {fold_best_metric_mean:.4f} +- {fold_best_metric_std:.4f}'
    sel_log(fold_stats, logger)
    send_line_notification(fold_stats)

    fold_best_metrics_raws_mean = np.mean(fold_best_metrics_raws, axis=0)
    fold_raw_stats = ''
    for metric_stats_raw in fold_best_metrics_raws_mean:
        fold_raw_stats += f'{float(metric_stats_raw):.4f},'
    sel_log(fold_raw_stats, logger)
    send_line_notification(fold_raw_stats)

    sel_log('now saving best checkpoints...', logger)


if __name__ == '__main__':
    args = parse_args(None)
    log_file = f'{EXP_ID}.log'
    logger = getLogger(__name__)
    logger = logInit(logger, f'{MNT_DIR}/logs/', log_file)
    sel_log(f'args: {sorted(vars(args).items())}', logger)

    # send_line_notification(f' ------------- start {EXP_ID} ------------- ')
    main(args, logger)
Ejemplo n.º 55
0
        self.assert_response_time(res, 2)
        self.assertEqual(res.status_code, 200)

    def test_get_by_valid_id(self):
        id = f"{osu_id}-{term_id}"
        res = utils.get_by_id(id)

        self.assertIn('data', res.json())
        self.assertIsInstance(res.json()['data'], dict)
        self.assert_response_time(res, 3)
        self.assertEqual(res.status_code, 200)

    def test_get_by_invalid_id(self):
        id = 'invalid_id'
        res = utils.get_by_id(id)

        self.assert_response_time(res, 2)
        self.assertEqual(res.status_code, 404)


if __name__ == '__main__':
    args, argv = utils.parse_args()
    config_data = utils.load_config(args.config)
    logging.basicConfig(level=logging.DEBUG if args.debug else logging.WARNING)

    osu_id = config_data['staff_fee_privilege_osu_id']
    term_id = config_data['staff_fee_privilege_term_id']

    sys.argv[:] = argv
    unittest.main()
Ejemplo n.º 56
0
import wandb
import torch
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from models import dispatch_model
from utils import parse_args, save_model
from dataset import get_train_dataloader, get_test_dataloader
from training import dispatch_optimizer, get_lr, dispatch_lr_scheduler
from metrics import compute_accuracy

save_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'models')
embeddings = []

if __name__ == '__main__':
    args = parse_args()
    print(args)

    use_cuda = not args.use_cpu and torch.cuda.is_available()
    device = 'cuda' if use_cuda else 'cpu'
    bs = args.train_batch_size
    best_acc = 0

    train_dataloader = get_train_dataloader(args.data_dir,
                                            args.train_batch_size,
                                            embedding=args.embedding)
    test_dataloader = get_test_dataloader(args.data_dir,
                                          args.train_batch_size,
                                          embedding=args.embedding)

    # metrics_train_dataloader = None # et_train_dataloader(args.data_dir, eval_batch, dataset_version, shuffle=False, use_transforms=False)
Ejemplo n.º 57
0
def main():
    args = parse_args()

    if args.username is None:
        print('No username specified.')
        sys.exit(1)
    if args.password is None:
        print('No password specified.')
        sys.exit(1)

    user_email = args.username
    user_pswd = args.password
    course_link = args.course_url[0]
    path = args.path
    overwrite = args.overwrite

    regex = r'(?:https?://)(?P<site>[^/]+)/(?P<baseurl>[^/]+)/(?P<coursename>[^/]+)/?'
    m = re.match(regex, args.course_url[0])
    if m is None:
        print('The URL provided is not valid for icourse163.')
        sys.exit(0)

    md = md5.new()
    md.update(user_pswd)
    encryptedpswd = md.hexdigest()

    if m.group('site') in ['www.icourse163.org']:
        login_data = {
            'product': 'imooc',
            'url': 'http://www.icourse163.org/mooc.htm?#/index',
            'savelogin': 1,
            'domains': 'icourse163.org',
            'type': 0,
            'append': 1,
            'username': user_email,
            'password': encryptedpswd
        }
        login_success_flag = '正在登录,请稍等...'
        web_host = 'www.icourse163.org'
        regex_loc = 'window.location.replace\(\"(http:\/\/reg\.icourse163\.org\/next\.jsp.+)\"\)'
    elif m.group('site') in ['mooc.study.163.com']:
        login_data = {
            'product': 'study',
            'url': 'http://study.163.com?from=study',
            'savelogin': 1,
            'domains': '163.com',
            'type': 0,
            'append': 1,
            'username': user_email,
            'password': encryptedpswd
        }
        login_success_flag = '登录成功,正在跳转'
        web_host = 'mooc.study.163.com'
        regex_loc = 'window.location.replace\(\"(http:\/\/study\.163\.com\?from=study)\"\)'
    else:
        print('The URL provided is not valid for icourse163.')
        sys.exit(0)
    path = os.path.join(path, clean_filename(m.group('coursename')))

    login_url = 'https://reg.163.com/logins.jsp'

    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',
        'Connection': 'keep-alive',
    }

    session = requests.Session()
    session.headers.update(headers)
    r1 = session.post(login_url, data=login_data)

    success = re.search(login_success_flag, r1.content)
    if not success:
        print('Fail to login.')
        exit(2)
    else:
        print('Login done...')

    se = re.search(regex_loc, r1.content)

    r = session.get(
        se.group(1),
        allow_redirects=True,
        cookies={'NTES_PASSPORT': session.cookies['NTES_PASSPORT']})

    # get course id, it's in cid.group(1)
    r2 = session.get(course_link)
    cid = re.search(r'window\.termDto = {             id:([0-9]+),',
                    r2.content)
    if cid is None:
        cid = re.search(r'termId : \"([0-9]+)\",', r2.content)

    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
        'Accept': '*/*',
        'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',
        'Connection': 'keep-alive',
        'Content-Type': 'text/plain',
        'Cookie': 'STUDY_SESS=%s; ' % session.cookies['STUDY_SESS'],
        'Host': web_host,
    }

    session.headers.update(headers)

    params = {
        'callCount': 1,
        'scriptSessionId': '${scriptSessionId}190',
        'httpSessionId': 'e8890caec7fe435d944c0f318b932719',
        'c0-scriptName': 'CourseBean',
        'c0-id': 0,
        'c0-methodName': 'getLastLearnedMocTermDto',
        'c0-param0': 'number:' + cid.group(1),
        'batchId': 434820,  #arbitrarily
    }

    getcourse_url = 'http://www.icourse163.org/dwr/call/plaincall/CourseBean.getLastLearnedMocTermDto.dwr'

    r3 = session.post(getcourse_url, data=params)

    print('Parsing...', end="")

    syllabus = parse_syllabus_icourse163(session, r3.content)

    if syllabus:
        print('Done.')
    else:
        print('Failed. No course content on the page.')
        sys.exit(0)

    print('Save files to %s' % path)

    download_syllabus_icourse163(session, syllabus, path)
Ejemplo n.º 58
0
def conv_spatial():
    ############################################################################
    # TEMPORAL CONVERGENCE TEST FOR ADVECTION
    ############################################################################
    mpi_num_procs, omp_num_threads = utils.parse_args()
    prog = 'advtv'
    num_steps = 8

    ##############################
    # TREE TOLERANCE
    ##############################
    tl_fact = 0.1
    tl_init = 1e-1
    tl_list = [
        tl_init * math.pow(tl_fact, float(cnt)) for cnt in range(0, num_steps)
    ]

    ##############################
    # TIME RESOLUTION
    ##############################
    dt_fact = 1
    dt_init = 1e-3
    dt_list = [
        dt_init * math.pow(dt_fact, float(cnt)) for cnt in range(0, num_steps)
    ]

    T_END = 1.0
    tn_fact = 1.0 / dt_fact
    tn_init = 1  #T_END/dt_init
    tn_list = [
        tn_init * math.pow(tn_fact, float(cnt)) for cnt in range(0, num_steps)
    ]

    ##############################
    # TREE DEPTH/POINTS
    ##############################
    dp_list = [15 for cnt in range(0, num_steps)]

    num_pnts = 8**(math.floor(math.log(mpi_num_procs, 8) + 1))
    pn_list = [num_pnts for cnt in range(0, num_steps)]

    ##############################
    # PARALLEL
    ##############################
    mpi_num_procs = mpi_num_procs
    np_list = [mpi_num_procs for cnt in range(0, num_steps)]

    nt = omp_num_threads
    nt_list = [nt for cnt in range(0, num_steps)]

    mrg_type = 3
    mg_list = [mrg_type for cnt in range(0, num_steps)]

    ##############################
    # CHEBYSHEV/CUBIC INTERPOLATION
    ##############################
    cq_list = [14 for cnt in range(0, num_steps)]
    ci_list = [True for cnt in range(0, num_steps)]
    uf_list = [4 for cnt in range(0, num_steps)]

    ##############################
    # VISUALIZATION
    ##############################
    vtk_save_rate = 0
    vs_list = [vtk_save_rate for cnt in range(0, num_steps)]

    cmd_args = OrderedDict()
    cmd_args = utils.generate_commands(prog, pn_list, tl_list, dp_list,
                                       cq_list, ci_list, uf_list, np_list,
                                       nt_list, dt_list, tn_list, vs_list,
                                       mg_list)
    utils.execute_commands(cmd_args, 'spatial')
Ejemplo n.º 59
0
def main():
    global args
    args = parse_args()
    train_net(args)
Ejemplo n.º 60
0
#!/usr/bin/env python
import sys
sys.path.append('..')

from piv.ufojob import UfoJob
from utils import parse_args

in_path, out_file = parse_args()

#number   = 1
in_path  = in_path or './inputs'
out_file = out_file or './output/bilateral.tif'

print 'Input image is ', in_path
print 'Output path is ', out_file

class UfoJob(UfoJob):
    def setup_tasks(self):
        p = self.parms
        self.add_task('read', path=in_path)
        self.add_task('write', filename=out_file)
        self.add_task('bilateral')
    def setup_graph(self):
        b = self.branch('read', 'bilateral', 'write')
        self.graph.connect_branch(b)

uj = UfoJob()
uj.setup_tasks()
uj.setup_graph()

uj.run_t()