예제 #1
0
def document():
    """Run the commands selected by the user with Documentor."""
    project, projectname, output, email, serve, deploy = arguments.parse()
    if deploy:
        output = os.path.abspath(output)
        _deploy(output, deploy)
        return
    elif serve:
        _serve(output)
        return

    if project is None or output is None:
        raise Exception("Invalid arguments, empty args are not allowed")

    if os.path.exists(project) and os.path.exists(output):
        project = os.path.abspath(project)
        project_name = _get_project_name(projectname, project)
        output = os.path.abspath(output)
        process = subprocess.Popen(["nikola", "init", project_name],
                                   cwd=output)
        process.wait()
        output_folder = os.path.join(output, project_name)
        if os.path.exists(output_folder):
            new_conf.create_new_configuration(output_folder, project_name,
                                              email)
            worker = analyzer.Analyzer(project, output_folder, project_name)
            worker.scan()
            process = subprocess.Popen(["nikola", "build"], cwd=output_folder)
            process.wait()
        else:
            print 'Something went wrong and output folder could not be created'
예제 #2
0
파일: train.py 프로젝트: arfu2016/decaNLP
def main():
    args = arguments.parse()
    if args is None:
        return
    set_seed(args)
    logger = initialize_logger(args)
    logger.info(f'Arguments:\n{pformat(vars(args))}')
    # 调用vars(args)的format函数,得到字符串?

    field, save_dict = None, None
    # tuple unpacking
    if args.load is not None:
        logger.info(f'Loading field from {os.path.join(args.save, args.load)}')
        save_dict = torch.load(os.path.join(args.save, args.load))
        field = save_dict['field']
        # field is the value in the 'field' key of the data
    field, train_sets, val_sets = prepare_data(args, field, logger)

    run_args = (field, train_sets, val_sets, save_dict)
    if len(args.gpus) > 1:
        logger.info(f'Multiprocessing')
        # 多gpu
        mp = Multiprocess(run, args)
        mp.run(run_args)
    else:
        logger.info(f'Processing')
        # f string of python 3.6
        run(args, run_args, world_size=args.world_size)
예제 #3
0
def document():
    """Run the commands selected by the user with Documentor."""
    project, projectname, output, email, serve, deploy = arguments.parse()
    if deploy:
        output = os.path.abspath(output)
        _deploy(output, deploy)
        return
    elif serve:
        _serve(output)
        return

    if project is None or output is None:
            raise Exception("Invalid arguments, empty args are not allowed")

    if os.path.exists(project) and os.path.exists(output):
        project = os.path.abspath(project)
        project_name = _get_project_name(projectname, project)
        output = os.path.abspath(output)
        process = subprocess.Popen(["nikola", "init", project_name], cwd=output)
        process.wait()
        output_folder = os.path.join(output, project_name)
        if os.path.exists(output_folder):
            new_conf.create_new_configuration(output_folder,
                                              project_name, email)
            worker = analyzer.Analyzer(project, output_folder, project_name)
            worker.scan()
            process = subprocess.Popen(["nikola", "build"], cwd=output_folder)
            process.wait()
        else:
            print 'Something went wrong and output folder could not be created'
예제 #4
0
def execute_translator():
    """Run the commands selected by the user with CodeTranslator."""
    project_to_po, project_to_translate, output = arguments.parse()
    output = os.path.abspath(output)
    if project_to_po:
        project_to_po = os.path.abspath(project_to_po)
        filename = code_parser.create_po(project_to_po)
        print("PO generated at: %s" % filename)

    if project_to_translate:
        project_to_translate = os.path.abspath(project_to_translate)
        code_parser.translate_source(project_to_translate, output)
        print("TRANSLATION GENERATED AT: %s" % output)
예제 #5
0
파일: driver.py 프로젝트: strangemonad/cplr
def main(args, compiler=None):
   """The entry point for CPLR's command line interface.
   
   It may be useful to call main programatically if you are writing a simple 
   wrapper around CPLR's command line interface. It is discouraged to try to 
   parse the output written to standard out to try an infer the source of the 
   error. Instead create a Compiler object directly and handle the exceptions 
   that may be raised.

   Parameters:
      arguments - A list of strings that control the compilation.
   
   Results:
      One of the following integer values:
      0 - The compilation succeeded.
      2 - The arguments were invalid.
      3 - The sources and output files determined from the arguments couldn't 
          be opened.
      4 - The compilation failed due to an error in the source content.
   
   Side effects:
      If the function returns successfully output may be written as directed by 
      the arguments.

      If the function returns an error code information about the error may be 
      printed to stderr.

      The function may never return and instead may raise any number of 
      exceptions if abnormal execution is encountered. 
      All raised exceptions are bugs. Note that cases such as invalid arguments 
      or failure to open a source file are expected to occur and will be 
      reported by the result rather than raising an error.

   """

   result = 0
   # Unified try:except:finally is a 2.5 feature
   try:
      try:
         sourceFiles = []

         (env, sources, output) = arguments.parse(args)

         sourceFiles = [open(source, "r") for source in sources]   
      except IOError, e:
         result = 3
         print >> sys.stderr, "'%s': %s" % (e.filename, e.strerror)

      else:
예제 #6
0
def main():
  # make sure the user has the correct versions of required software installed
  util.ensure_required_software()

  args = arguments.parse()
  conf = config.load_config()

  # call the subcommand the user specified with the config and arguments
  try:
    args.command(conf, args)
  except Exception as e:
    # raise the full exeption if debug is enabled
    if args.debug:
      raise

    # if we encounter an exception, print it and exit with an error
    print(color.red('[error]'), e, file=sys.stderr)
    sys.exit(1)
예제 #7
0
def main():
    args = arguments.parse()
    if args is None:
        return
    set_seed(args)
    logger = initialize_logger(args)
    logger.info(f'Arguments:\n{pformat(vars(args))}')

    field, save_dict = None, None
    if args.load is not None:
        logger.info(f'Loading field from {os.path.join(args.save, args.load)}')
        save_dict = torch.load(os.path.join(args.save, args.load))
        field = save_dict['field']
    field, train_sets, val_sets = prepare_data(args, field, logger)

    run_args = (field, train_sets, val_sets, save_dict)
    if len(args.gpus) > 1:
        logger.info(f'Multiprocessing')
        mp = Multiprocess(run, args)
        mp.run(run_args)
    else:
        logger.info(f'Processing')
        run(args, run_args, world_size=args.world_size)
예제 #8
0
파일: train.py 프로젝트: AhlamMD/decaNLP
def main():
    args = arguments.parse()
    if args is None:
        return
    set_seed(args)
    logger = initialize_logger(args)
    logger.info(f'Arguments:\n{pformat(vars(args))}')

    field, save_dict = None, None
    if args.load is not None:
        logger.info(f'Loading field from {os.path.join(args.save, args.load)}')
        save_dict = torch.load(os.path.join(args.save, args.load))
        field = save_dict['field']
    field, train_sets, val_sets = prepare_data(args, field, logger)

    run_args = (field, train_sets, val_sets, save_dict)
    if len(args.gpus) > 1:
        logger.info(f'Multiprocessing')
        mp = Multiprocess(run, args)
        mp.run(run_args)
    else:
        logger.info(f'Processing')
        run(args, run_args, world_size=args.world_size)
예제 #9
0
def main():
    args = arguments.parse()
    if args is None:
        return
    set_seed(args)
    # 给numpy and torch设定seed
    logger = initialize_logger(args)
    logger.info(f'Arguments:\n{pformat(vars(args))}')
    # 调用vars(args)的format函数,得到字符串?
    # pformat是一种format函数,从pprint中引入的

    field, save_dict = None, None
    # tuple unpacking
    if args.load is not None:
        logger.info(f'Loading field from {os.path.join(args.save, args.load)}')
        save_dict = torch.load(os.path.join(args.save, args.load))
        field = save_dict['field']
        # field is the value in the 'field' key of the data

        logger.info(field)

    # field is None
    prepare_data(args, field, logger)
예제 #10
0
def main():
    app_path = files.get_app_path()
    if not os.path.exists(app_path):
        os.mkdir(app_path)
    if not os.path.isfile(files.get_config_path()):
        open(files.get_config_path(), 'w')
    if not os.path.isfile(files.get_accounts_path()):
        open(files.get_accounts_path(), 'w')

    aws_path = files.get_aws_path()
    if not os.path.exists(aws_path):
        os.mkdir(aws_path)

    args = arguments.parse(sys.argv[1:])
    logging.basicConfig(level=logging.getLevelName(args.loglevel))
    logger = logging.getLogger('logsmith')
    logger.propagate = False
    logger.setLevel(logging.DEBUG)
    log_formatter = logging.Formatter(
        "%(asctime)12s [%(levelname)s] %(threadName)-12.12s %(message)s")

    file_handler = logging.FileHandler(f'{app_path}/app.log', mode='w')
    file_handler.setFormatter(log_formatter)
    logger.addHandler(file_handler)

    console_handler = logging.StreamHandler()
    console_handler.setFormatter(log_formatter)
    logger.addHandler(console_handler)

    logging.info(f'config dir {app_path}')
    logging.info('start app')

    if arguments.use_cli(args):
        start_cli(args)
    else:
        start_gui()
                req = send_data_to_master(data, req, buffer,
                                          verbose=verbose)


    except KeyboardInterrupt:
        print "Terminating program."

        if rank == 0:
            if args.save_data != 'no':
                closeSaveFile(saveFile)



if __name__ == '__main__':
    # Start here
    # parset the command line
    args = arguments.parse()

    if worldSize < 2:
        print 'Script reqires at least two MPI processes to run propperly.'
        sys.exit(0)
    if args.verbose:
        print args
        if args.sendPV:
            print 'Will send PVs'
        else:
            print 'Will NOT send PVs'

    main(args, args.verbose)

예제 #12
0
def main():
    args = arguments.parse()

    checkpoint = args.checkpoint if args.checkpoint else None

    model, params = get_network(args.arch,
                                args.n_attrs,
                                checkpoint=checkpoint,
                                base_frozen=args.freeze_base)

    criterion = get_criterion(loss_type=args.loss, args=args)

    optimizer = get_optimizer(params,
                              fc_lr=float(args.lr),
                              opt_type=args.optimizer_type,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)

    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=10,
                                          gamma=0.1,
                                          last_epoch=args.start_epoch - 1)
    if checkpoint:
        state = torch.load(checkpoint)
        model.load_state_dict(state["state_dict"])
        scheduler.load_state_dict(state['scheduler'])

    # Dataloader code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_transforms = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])
    val_transforms = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        normalize,
    ])

    logger.info("Setting up training data")
    train_loader = data.DataLoader(COCOAttributes(
        args.attributes,
        args.train_ann,
        train=True,
        split='train2014',
        transforms=train_transforms,
        dataset_root=args.dataset_root),
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.workers,
                                   pin_memory=True)

    logger.info("Setting up validation data")
    val_loader = data.DataLoader(COCOAttributes(
        args.attributes,
        args.val_ann,
        train=False,
        split='val2014',
        transforms=val_transforms,
        dataset_root=args.dataset_root),
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 pin_memory=True)

    best_prec1 = 0

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    logger.info("Beginning training...")

    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()

        # train for one epoch
        trainer.train(train_loader, model, criterion, optimizer, epoch,
                      args.print_freq)

        # evaluate on validation set
        # trainer.validate(val_loader, model, criterion, epoch, args.print_freq)
        prec1 = 0

        # remember best prec@1 and save checkpoint
        best_prec1 = max(prec1, best_prec1)
        trainer.save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'loss': args.loss,
                'optimizer': args.optimizer_type,
                'state_dict': model.state_dict(),
                'scheduler': scheduler.state_dict(),
                'batch_size': args.batch_size,
                'best_prec1': best_prec1,
            }, args.save_dir,
            '{0}_{1}_checkpoint.pth.tar'.format(args.arch, args.loss).lower())

    logger.info('Finished Training')

    logger.info('Running evaluation')
    evaluator = evaluation.Evaluator(model,
                                     val_loader,
                                     batch_size=args.batch_size,
                                     name="{0}_{1}".format(
                                         args.arch, args.loss))
    with torch.no_grad():
        evaluator.evaluate()
예제 #13
0
 def testTwoArguments(self):
    (env, sources, output) = arguments.parse(["1", "2"])
    self.assertEqual("2", output, 
                     "'output' should contain the last positional argument")
    self.assertEqual(["1"], sources, 
                    "'sources' should be a list of all source files")
예제 #14
0
def main():
    """
    Main function for training.
    """
    args = arguments.parse()
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    param_dict = dict()

    # Save model and meta-data. Always saves in a new sub-folder.
    if args.save_folder:
        timestamp = datetime.datetime.now().isoformat().replace(':', '-')
        save_folder = '{}/exp{}_{}/'.format(args.save_folder, timestamp,
                                            args.out)
        os.mkdir(save_folder)
        meta_file = os.path.join(save_folder, 'metadata.pkl')
        encoder_file = os.path.join(save_folder, 'encoder.pt')
        decoder_file = os.path.join(save_folder, 'decoder.pt')

        log_file = os.path.join(save_folder, 'log.txt')
        log = open(log_file, 'w')

        pickle.dump({'args': args}, open(meta_file, "wb"))

        param_dict.update({
            'save_folder': save_folder,
            'encoder_file': encoder_file,
            'decoder_file': decoder_file
        })
    else:
        print("WARNING: No save_folder provided!" +
              "Testing (within this script) will throw an error.")

    def transform(datum):
        if args.dataset == 'deap':
            data_t = transform_deap_data_raw(datum)
            label_t = transform_deap_label_video(datum)
        elif args.dataset == 'dreamer':
            data_t = transform_dreamer_data_raw(datum)
            label_t = transform_dreamer_label_video(datum)

        return data_t, label_t

    loaders = load_lmdb_dataset(lmdb_root=args.data_path,
                                batch_size=args.batch_size, transform=transform,
                                shuffle=True)

    encoder, decoder = _construct_model(args)
    rel_rec, rel_send = _construct_auxiliary_parameters(args)
    rel_rec, rel_send = _make_cuda((encoder, decoder), (rel_rec, rel_send),
                                   args)
    optimizer, scheduler = _construct_optimizer((encoder, decoder), args)

    param_dict.update({
        'loaders': loaders,
        'encoder': encoder,
        'decoder': decoder,
        'optimizer': optimizer,
        'scheduler': scheduler,
        'rel_rec': rel_rec,
        'rel_send': rel_send
    })

    if args.tensorboard:
        import socket
        log_dir = os.path.join(
            'runs', timestamp + '_' + args.out + '_' + socket.gethostname())
        writer = SummaryWriter(logdir=log_dir)
        param_dict['writer'] = writer

    # Train model
    best_val_loss = np.inf
    best_epoch = 0
    for epoch in range(args.epochs):
        val_loss = train(epoch, best_val_loss, args, param_dict)
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            best_epoch = epoch

    with redirect_stdout(open(args.out, 'a')):
        print("Optimization Finished!")
        print("Best Epoch: {:04d}".format(best_epoch + 1))
        if args.save_folder:
            print("Best Epoch: {:04d}".format(best_epoch + 1), file=log)
            log.flush()

    # Test model
    metric_dict = test(args, param_dict)
    if log is not None:
        print(save_folder)
        log.close()

    if writer is not None:
        if args.deterministic_sampling:
            gsample = 'DET'
        elif args.hard:
            gsample = 'STO'
        else:
            gsample = 'CON'

        hparam_dict = {
            'lr': args.lr,
            'sampling': gsample,
            'enc_hid': args.encoder_hidden,
            'dec_hid': args.decoder_hidden,
        }
        writer.add_text('parameters', str(hparam_dict), 0)
        writer.add_text('metrics', str(metric_dict), 0)
        writer.close()
예제 #15
0

from arguments import parse
from agent import Agent, LinearSchedule
import sys
sys.path.append('..')
from common import make_env, print_dict
import argparse
import pretty_errors


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="DQN Breakouts")
    parser.add_argument('--train', action='store_true', help='whether train DQN')
    parser.add_argument('--test', action='store_true', help='whether test DQN')
    args = parse(parser)

    env = make_env(args.env_name, 1000, args.num_procs)
    test_env = make_env(args.env_name, 1000, 1, clip_reward=False)
    args.n_obs = env.observation_space.shape[-1]
    args.n_action = env.action_space.n

    first_obs = env.reset()
    agent = Agent(args, first_obs)
    if args.train:
        agent.collect_experiences(env, args.random_epoches, 1.0) # collect some samples via random action
        epsilon_generator = LinearSchedule(args.train_epoches, args.final_epsilon)
        for i in range(args.train_epoches):
            epsilon = epsilon_generator(i)
            log = agent.collect_experiences(env, args.update_freqence, epsilon)
            info = agent.improve_policy(args.update_times)
예제 #16
0
def stream(args, d):
    stream = sd.InputStream(
        device=args.device[d],
        channels=args.channels,
        samplerate=args.samplerate,
        callback=audio_callback,
    )
    outport = mido.open_output(bus_names[args.bus[d]])
    with stream:
        compute_pitch(args, outport)


if __name__ == "__main__":
    global args
    args, parser = parse(sys.argv[1:])
    global bus_names
    bus_names = mido.get_output_names()
    try:
        if args.list_devices:
            print("Sound devices:")
            print(sd.query_devices())
            print("Virtual MIDI Buses:")
            for i, v in enumerate(bus_names):
                print(f"   {i} {v}")
            parser.exit(0)

        # low level stream
        """
        callback (callable, optional) – User-supplied function to consume,
        process or generate audio data in response to requests from an active
예제 #17
0
def main():
    """The main function of the IRBeast program."""
    # pylint: disable=global-statement
    global LOGGED_IN
    LOGGED_IN = False
    checklist_file = ""
    args = arguments.parse(sys.argv[1:])
    # checks to see if there are any command line arguments passed
    # if so skip repl, otherwise launch repl
    if all(
            # pylint: disable=bad-continuation
            getattr(args, arg) is None or getattr(args, arg) is False
            # pylint: disable=bad-continuation
            for arg in vars(args)):
        print("Welcome to IRBeast")
        # Get login details before entering repl
        while not LOGGED_IN:
            username = str(input("Username: "******"Password: "******"quit" and LOGGED_IN:
            if args[0] == "file":
                checklist_file = file()
            elif args[0] == "checklist":
                checklist(checklist_file)
            elif args[0] == "logout":
                print("Logging user out")
                LOGGED_IN = False
                while not LOGGED_IN:
                    username = str(input("Username: "******"Password: "******"submit":
                submit()
            elif args[0] == "help":
                help(args)
            elif args[0] == "quit":
                print("Thank You")
                sys.exit()
            args = repl_command()
    else:
        if arguments.verify(args):
            if args.login is not False and login_user(
                    # pylint: disable=bad-continuation
                    args.username,
                    args.password,
            ):
                print("Logged In")
                if args.checklist is not False:
                    if args.file is not None:
                        checklist(args.file)
                    else:
                        print("Please Supply a Checklist File")
                if args.submit is not None:
                    submit()
            else:
                print("Invalid Login Info")
                sys.exit()
        else:
            print("Missing Command Line Arguments")
            sys.exit()
예제 #18
0
파일: iris.py 프로젝트: Gliptal/simil-aloha
# IMPORTS #
###########

import numpy.random as distributions

import arguments
import settings
from scheduler import Scheduler
from settings import NODES
from node import Node

########
# MAIN #
########

arguments.save(arguments.parse())

# init
#

distributions.seed(settings.SEED)

for point in settings.POINTS:
    NODES.append(Node(point[0], point[1]))

for node in NODES:
    node.find_neigbhours(NODES)
    node.generate_next_packet()

# simulation
#
예제 #19
0
#!/usr/bin/env python3

import arguments
import devices
import simctl
import react_native

if __name__ == '__main__':
    args = arguments.parse()
    device = devices.find_by_spec(devices.DeviceSpec(args.simulator, os='iOS'))

    if device:
        simctl.shutdown_all()
        simctl.boot(udid=device.udid)
        react_native.run_ios()
    else:
        print('Device %s not found' % device_spec)

예제 #20
0
def main():
    args = arguments.parse()

    feat_extractor = FeatureExtractor().cuda()
    svms = [SVM() for _ in range(args.n_attrs)]

    # Dataloader code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_transforms = transforms.Compose([
        transforms.Resize((224, 224)),
        # transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])
    val_transforms = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        normalize,
    ])

    logger.info("Setting up training data")
    train_loader = data.DataLoader(COCOAttributes(
        args.attributes,
        args.train_ann,
        train=True,
        split='train2014',
        transforms=train_transforms,
        dataset_root=args.dataset_root),
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.workers,
                                   pin_memory=True)

    logger.info("Setting up validation data")
    val_loader = data.DataLoader(COCOAttributes(
        args.attributes,
        args.val_ann,
        train=False,
        split='val2014',
        transforms=val_transforms,
        dataset_root=args.dataset_root),
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 pin_memory=True)

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    logger.info("Beginning training...")

    feats, targets = get_features(feat_extractor,
                                  train_loader,
                                  n_attrs=args.n_attrs,
                                  split='train')

    if args.checkpoint:
        [svms, _] = joblib.load(args.checkpoint)

    else:
        for i in range(args.n_attrs):
            print("Training for attribute", i)
            # using [0, 1] or [-1, 1] doesn't really make a difference
            svms[i].train(feats, targets[:, i])
            print()

    logger.info('Finished Training')

    logger.info("Running evaluation")

    feats, targets = get_features(feat_extractor,
                                  val_loader,
                                  n_attrs=args.n_attrs,
                                  split='val')

    ap_scores = []

    for i in range(args.n_attrs):
        est = svms[i].test(feats)
        ap_score = average_precision(2 * targets[:, i] - 1, est)
        print("AP score for {0}".format(i), ap_score)
        ap_scores.append(ap_score)

    print("mean AP", sum(ap_scores) / args.n_attrs)

    if not args.checkpoint:
        logger.info("Saving models and AP scores")
        joblib.dump([svms, ap_scores], "svm_baseline.jbl")