コード例 #1
0
ファイル: Oracle_NN.py プロジェクト: arqam-ai/SVR
def main(args):
	
	logger = logging.getLogger()
	file_log_handler = logging.FileHandler('Train.log')
	logger.addHandler(file_log_handler)
	logger.setLevel('INFO')
	formatter = logging.Formatter()
	file_log_handler.setFormatter(formatter)
	stderr_log_handler = logging.StreamHandler(sys.stdout)
	logger.addHandler(stderr_log_handler)
	stderr_log_handler.setFormatter(formatter)

	# class_dic : {'class name ': instance number of test/train set}
	_, test_class_dic, _, _ = class_counter(args, 'test')
	_, train_class_dic, _, _ = class_counter(args, 'train')
	
	#print(train_class_dic)

	kwargs = {'num_workers':4, 'pin_memory':True}
	# the class order of loaded ptcloud is consistent with train/test_class_dic's key 
	ptcloud = np.load(args.ptcloud_path)
	train_set = ptcloud['train']         
	test_set = ptcloud['test']
	assert train_set.shape[0] == sum(train_class_dic.values())
	assert test_set.shape[0] == sum(test_class_dic.values())
	train_set = torch.from_numpy(train_set).to(args.device)
	test_set = torch.from_numpy(test_set).to(args.device)
	model = Oracle_NN(args.device, logger)

	train_idx = 0 # front index of a train class
	test_idx = 0 # front index of a test class
	train_slicer = 0 # back index of a train class
	test_slicer = 0 # back index of a test class
	class_loss = {} 
	for idx, key in enumerate(train_class_dic):
		train_slicer += train_class_dic[key]
		test_slicer += test_class_dic[key]
		logger.info('key:{},train_idx:{},train_slicer:{},num:{}'.format(key, train_idx, train_slicer, train_class_dic[key]))
		logger.info('key:{},test_idx:{},test_slicer:{},num:{}'.format(key, test_idx, test_slicer, test_class_dic[key]))
		# input specific class train set and test set to model.forward 
		avg_loss, class_prediction = model.forward(train_set[train_idx:train_slicer], test_set[test_idx:test_slicer])
		if idx == 0:
			prediction = class_prediction
		else:
			prediction = torch.cat((prediction, class_prediction),0)
		train_idx += train_class_dic[key]
		test_idx += test_class_dic[key]
		class_loss[key] = avg_loss.item()
		logger.info('key:{}, avg loss:{}'.format(key, class_loss[key]))


	
	logger.info('final avg loss: {}'.format(avg))
	prediction = prediction.cpu().numpy()
	np.save('prediction.npy', prediction)
	np.save('class_loss.npy', class_loss)
コード例 #2
0
    def encode_sentences(self, sentences):
        """Encode a list of sentences

        Args:
            sentences: the list of sentences

        Returns:
            an (N, d) numpy matrix of sentence encodings.
        """
        encodings = []
        glog.setLevel("ERROR")
        for i in tqdm(range(0, len(sentences), self._batch_size),
                      "encoding sentence batches"):
            encodings.append(
                self._encoder_client.encode(sentences[i:i + self._batch_size]))
        glog.setLevel("INFO")
        print('DEBUG SBERT SIZE:', np.vstack(encodings).shape)
        return l2_normalize(np.vstack(encodings))
コード例 #3
0
    def __init__(self, model_dir=None):
        """ Init the parser given the model folder.
        :param model_dir: Path to the folder storing the model, downloaded by
        RerankingParser.fetch_and_load() method.
        :type model_dir: str
        """
        if model_dir is None:
            # Use 'model/GENIA+PubMed' under current folder as default.
            filepath = os.path.realpath(__file__)
            dirpath = os.path.dirname(filepath)
            model_dir = os.path.join(dirpath, 'model/GENIA+PubMed')
        glog.setLevel(glog.INFO)
        glog.info('Loading model from ' + model_dir)

        # BLLIP parser doesn't support multi-thread parsing, and we will
        # run it as one thread in one process (in both nlputils and spark),
        # so this should be fine. It will raise a RunTimeError when initialize
        # twice.
        self.reranking_parser = \
            bllipparser.RerankingParser.from_unified_model_dir(model_dir)
コード例 #4
0
def get_logger(logPath='./logs', fileName=None, logLevel="INFO", is_Del=True):
    
    surName = datetime.datetime.now().strftime('%m-%d-%y_%H-%M-%S')
    if fileName is None:
        fileName = surName
    else:
        fileName = fileName + '-' + surName

    logFile = "{0}/{1}.log".format(logPath, fileName)
    
    is_Exist_Path = os.path.exists(logPath)
    is_Exist_File = os.path.exists(logFile)
    if not is_Exist_Path:
        os.system('mkdir ' + logPath)
    if is_Del and is_Exist_File:
        os.system('rm ' + logFile)
        
    handler = glog.logging.FileHandler(logFile)
    glog.logger.addHandler(handler)
    glog.setLevel(logLevel)

    glog.info("Happy Logging!")
    return glog, logFile
コード例 #5
0
ファイル: lander.py プロジェクト: Babars7/real-lsd
    steps = 0
    total_reward = 0
    while not done:
        state = torch.FloatTensor(state).unsqueeze(0).to(device)
        dist, _ = agent.model(state)
        next_state, reward, done, _ = env.step(dist.sample().cpu().numpy()[0])
        state = next_state
        steps +=1
        total_reward += reward

    mean_reward = total_reward / steps
    return [total_reward, mean_reward]

'''---------------------------------------------------------------'''
# Set to INFO for debugging
log.setLevel("WARN")

'''---------------Copy settings file to data folder---------------'''
path     = os.path.dirname(real_lsd.__file__)
abs_path = path + '/envs/settings/landing/cpptest.json'
cp_path  = '/media/scratch1/nasib/data/'
list_files = subprocess.run(["cp", abs_path, cp_path])
log.warn("The exit code was: %d" % list_files.returncode)
'''---------------------------------------------------------------'''

'''---------------Check cuda availability/set device--------------'''
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
'''---------------------------------------------------------------'''

# initialise environment
コード例 #6
0
ファイル: multi_instance.py プロジェクト: sidney1994/bionlp17
        # Train binary classifiers.
        self.binary_learner.learn(binary_class_data, binary_class_label, 10)

    def learn(self, epoch=10):
        for i in range(epoch):
            glog.info('Epoch {}'.format(i))
            self.e_step_result = []
            for f in range(self.fold):
                self.e_step(f)
            for f in range(self.fold):
                self.m_step(f)


if __name__ == '__main__':
    glog.setLevel(glog.DEBUG)
    train_file = sys.argv[1]
    test_file = sys.argv[2]
    eval_file = sys.argv[3]
    # Used for training the binary classifier at entity pair level.
    relation_name = sys.argv[4]

    # Iterator for training data.
    iter_train = MultiInstanceIterator('train')
    iter_train.base_iter.set_min_feature_freq(2)

    iter_train.base_iter.set_feature_set('miml')
    iter_train.base_iter.log_setting()

    iter_train.base_iter.load_instance(train_file)
    iter_train.load_as_training_data(shuffle_entity_pair=True)
コード例 #7
0
ファイル: main.py プロジェクト: FancyXun/labeltoy
def _main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--version',
                        '-V',
                        action='store_true',
                        help='show version')
    parser.add_argument('--reset-config',
                        action='store_true',
                        help='reset qt config')
    parser.add_argument(
        '--logger-level',
        default='info',
        choices=['debug', 'info', 'warning', 'fatal', 'error'],
        help='logger level',
    )
    parser.add_argument('filename', nargs='?', help='image or label filename')
    parser.add_argument(
        '--output',
        '-O',
        '-o',
        help='output file or directory (if it ends with .json it is '
        'recognized as file, else as directory)')
    default_config_file = os.path.join(os.path.expanduser('~'), '.labelmerc')
    parser.add_argument(
        '--config',
        dest='config_file',
        help='config file (default: %s)' % default_config_file,
        default=default_config_file,
    )
    # config for the gui
    parser.add_argument(
        '--nodata',
        dest='store_data',
        action='store_false',
        help='stop storing image data to JSON file',
        default=argparse.SUPPRESS,
    )
    parser.add_argument(
        '--autosave',
        dest='auto_save',
        action='store_true',
        help='auto save',
        default=argparse.SUPPRESS,
    )
    parser.add_argument(
        '--nosortlabels',
        dest='sort_labels',
        action='store_false',
        help='stop sorting labels',
        default=argparse.SUPPRESS,
    )
    parser.add_argument(
        '--flags',
        help='comma separated list of flags OR file containing flags',
        default=argparse.SUPPRESS,
    )
    parser.add_argument(
        '--labels',
        help='comma separated list of labels OR file containing labels',
        default=argparse.SUPPRESS,
    )
    parser.add_argument(
        '--validatelabel',
        dest='validate_label',
        choices=['exact', 'instance'],
        help='label validation types',
        default=argparse.SUPPRESS,
    )
    parser.add_argument(
        '--keep-prev',
        action='store_true',
        help='keep annotation of previous frame',
        default=argparse.SUPPRESS,
    )
    parser.add_argument(
        '--epsilon',
        type=float,
        help='epsilon to find nearest vertex on canvas',
        default=argparse.SUPPRESS,
    )
    args = parser.parse_args()

    if args.version:
        print('{0} {1}'.format(__appname__, __version__))
        sys.exit(0)

    log.setLevel(getattr(logging, args.logger_level.upper()))

    if hasattr(args, 'flags'):
        if os.path.isfile(args.flags):
            with codecs.open(args.flags, 'r', encoding='utf-8') as f:
                args.flags = [l.strip() for l in f if l.strip()]
        else:
            args.flags = [l for l in args.flags.split(',') if l]

    if hasattr(args, 'labels'):
        if os.path.isfile(args.labels):
            with codecs.open(args.labels, 'r', encoding='utf-8') as f:
                args.labels = [l.strip() for l in f if l.strip()]
        else:
            args.labels = [l for l in args.labels.split(',') if l]

    config_from_args = args.__dict__
    config_from_args.pop('version')
    reset_config = config_from_args.pop('reset_config')
    filename = config_from_args.pop('filename')
    output = config_from_args.pop('output')
    config_file = config_from_args.pop('config_file')
    config = get_config(config_from_args, config_file)

    if not config['labels'] and config['validate_label']:
        log.error('--labels must be specified with --validatelabel or '
                  'validate_label: true in the config file '
                  '(ex. ~/.labelmerc).')
        sys.exit(1)

    output_file = None
    output_dir = None
    if output is not None:
        if output.endswith('.json'):
            output_file = output
        else:
            output_dir = output

    app = QtWidgets.QApplication(sys.argv)
    app.setApplicationName(__appname__)
    app.setWindowIcon(newIcon('icon'))
    win = MainWindow(
        config=config,
        filename=filename,
        output_file=output_file,
        output_dir=output_dir,
    )

    if reset_config:
        log.info('Resetting Qt config: %s' % win.settings.fileName())
        win.settings.clear()
        sys.exit(0)

    win.show()
    win.raise_()
    sys.exit(app.exec_())
コード例 #8
0
def main():
    parser = argparse.ArgumentParser(
        description="Upload MLBF files to Kinto as records"
    )

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument("--crlite", action="store_true", help="Perform a CRLite update")
    group.add_argument(
        "--intermediates", action="store_true", help="Perform an Intermediate CA update"
    )

    crlite_group = parser.add_argument_group("crlite", "crlite upload arguments")
    crlite_group.add_argument("--noop", action="store_true", help="Don't update Kinto")
    crlite_group.add_argument(
        "--download-path",
        type=Path,
        default=Path(tempfile.TemporaryDirectory().name),
        help="Path to temporarily store CRLite downloaded artifacts",
    )

    int_group = parser.add_argument_group(
        "intermediates", "intermediates upload arguments"
    )
    int_group.add_argument(
        "--debug", action="store_true", help="Enter a debugger during processing"
    )
    int_group.add_argument(
        "--delete", action="store_true", help="Delete entries that are now missing"
    )
    int_group.add_argument(
        "--export", help="Export intermediate set inspection files to this folder"
    )

    parser.add_argument("--filter-bucket", default="crlite_filters")
    parser.add_argument("--verbose", "-v", help="Be more verbose", action="store_true")

    signer_group = parser.add_mutually_exclusive_group()
    signer_group.add_argument(
        "--request-review",
        action="store_true",
        help="Mark the Kinto collection for signature when done",
    )
    signer_group.add_argument(
        "--approve-sign",
        action="store_true",
        help="Approve the Kinto collection for signing",
    )

    args = parser.parse_args()

    if args.verbose:
        log.setLevel("DEBUG")

    if args.noop:
        log.info("The --noop flag is set, will not make changes.")

    auth = {}
    try:
        ensureNonBlank(["KINTO_AUTH_TOKEN"])
        auth = BearerTokenAuth(settings.KINTO_AUTH_TOKEN)
        log.info("Using authentication bearer token")
    except Exception:
        ensureNonBlank(["KINTO_AUTH_USER", "KINTO_AUTH_PASSWORD"])
        auth = HTTPBasicAuth(settings.KINTO_AUTH_USER, settings.KINTO_AUTH_PASSWORD)
        log.info(
            "Using username/password authentication. Username={}".format(
                settings.KINTO_AUTH_USER
            )
        )

    log.info(
        f"Connecting... RO={settings.KINTO_RO_SERVER_URL}, RW={settings.KINTO_RW_SERVER_URL}"
    )

    rw_client = PublisherClient(
        server_url=settings.KINTO_RW_SERVER_URL,
        auth=auth,
        bucket=settings.KINTO_BUCKET,
        retry=5,
    )

    ro_client = PublisherClient(
        server_url=settings.KINTO_RO_SERVER_URL,
        bucket=settings.KINTO_BUCKET,
        retry=5,
    )

    try:
        if args.approve_sign:
            if args.crlite:
                crlite_sign(args=args, rw_client=rw_client)
            elif args.intermediates:
                intermediates_sign(args=args, rw_client=rw_client)
            else:
                parser.print_help()
            return

        if args.crlite:
            publish_crlite(args=args, rw_client=rw_client, ro_client=ro_client)
            if not args.noop and args.request_review:
                log.info("Set for review")
                rw_client.request_review_of_collection(
                    collection=settings.KINTO_CRLITE_COLLECTION,
                )
            return

        if args.intermediates:
            publish_intermediates(args=args, rw_client=rw_client, ro_client=ro_client)
            return

        parser.print_help()

    except KintoException as ke:
        log.error("An exception at Kinto occurred: {}".format(ke))
        raise ke
    except Exception as e:
        log.error("A general exception occurred: {}".format(e))
        raise e
コード例 #9
0
ファイル: train.py プロジェクト: arqam-ai/SVR
def main(args):

    # load data
    starter_time = time.time()
    kwargs = {'num_workers': 16, 'pin_memory': True}
    print("loading train data ...")
    train_loader = torch.utils.data.DataLoader(
        what3d_dataset_views(data_basedir=args.data_basedir,
                             ptcloud_path=args.ptcloud_path,
                             img_path=args.img_path,
                             label_path=args.label_path,
                             splits_path=args.splits_path,
                             split_name='train',
                             class_path=args.class_path,
                             sample_ratio=args.sample_ratio,
                             image_height=args.image_size,
                             image_width=args.image_size,
                             views=list(args.views),
                             read_view=args.read_view,
                             points_num=args.pts_num,
                             mode=args.mode),
        batch_size=args.train_batch_size,
        shuffle=True,
        **kwargs)
    print("loading test data ...")
    test_loader = torch.utils.data.DataLoader(what3d_dataset_views(
        data_basedir=args.data_basedir,
        ptcloud_path=args.ptcloud_path,
        img_path=args.img_path,
        label_path=args.label_path,
        splits_path=args.splits_path,
        split_name='test',
        class_path=args.class_path,
        sample_ratio=args.sample_ratio,
        image_height=args.image_size,
        image_width=args.image_size,
        views=list(args.views),
        read_view=args.read_view,
        points_num=args.pts_num,
        mode=args.mode),
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              **kwargs)

    print("loading val data ...")
    val_loader = torch.utils.data.DataLoader(what3d_dataset_views(
        data_basedir=args.data_basedir,
        ptcloud_path=args.ptcloud_path,
        img_path=args.img_path,
        label_path=args.label_path,
        splits_path=args.splits_path,
        split_name='val',
        class_path=args.class_path,
        sample_ratio=args.sample_ratio,
        image_height=args.image_size,
        image_width=args.image_size,
        views=list(args.views),
        read_view=args.read_view,
        points_num=args.pts_num,
        mode=args.mode),
                                             batch_size=args.val_batch_size,
                                             shuffle=False,
                                             **kwargs)
    print("Initialize cache={}".format(time.time() - starter_time))

    netG = GeneratorVanilla(grid_dims=(32, 32, 1),
                            resgen_width=512,
                            resgen_depth=5,
                            resgen_codelength=512,
                            class_num=55,
                            read_view=args.read_view,
                            folding_twice=args.folding_twice)

    print(str(netG))

    #netG.to(args.device)
    netG = torch.nn.DataParallel(netG, device_ids=[0, 1])

    logger = logging.getLogger()
    logger.info('Number of parameters={}'.format(
        count_parameter_num(netG.parameters())))
    check_exist_or_mkdirs(args.log_dir)
    file_log_handler = logging.FileHandler(
        os.path.join(args.log_dir, args.log_filename))
    logger.addHandler(file_log_handler)

    #stderr_log_handler = logging.StreamHandler(sys.stdout)
    #logger.addHandler(stderr_log_handler)

    logger.setLevel('INFO')
    formatter = logging.Formatter()
    file_log_handler.setFormatter(formatter)
    #stderr_log_handler.setFormatter(formatter)
    logger.info(args)

    # set solver and loss function
    criterion_G = ChamferDistance().to(args.device)
    criterion_C = torch.nn.CrossEntropyLoss().to(args.device)
    criterion_M = torch.nn.MSELoss(reduction='mean')

    optmizer_G = torch.optim.Adam(
        netG.parameters(),
        lr=args.lr_G,
        betas=(args.G_adam_beta1, 0.999),
        weight_decay=args.weight_decay,
    )

    lr_scheduler_G = torch.optim.lr_scheduler.StepLR(
        optmizer_G,
        step_size=args.lr_decay_step,
        gamma=args.lr_decay_rate,
    )

    # train and test#

    runner = TrainTester(
        netG=netG,
        criterion_G=criterion_G,
        criterion_C=criterion_C,
        criterion_M=criterion_M,
        optmizer_G=optmizer_G,
        lr_scheduler_G=lr_scheduler_G,
        alpha=args.alpha,
        logger=logger,
        args=args,
    )

    if args.train:
        runner.run(train_loader=train_loader,
                   test_loader=test_loader,
                   val_loader=val_loader)
        logger.info('Training Done!')

    if args.test:
        runner.netG.load_state_dict(
            torch.load(os.path.join(args.snapshot_dir,
                                    "model_train_best.pth")))
        runner.optimizer_G.load_state_dict(
            torch.load(os.path.join(args.snapshot_dir,
                                    "solver_train_best.pth")))

        runner.test(epoch=args.total_epochs + 1,
                    loader=test_loader,
                    type='test')
        logger.info('Testing Done!')
コード例 #10
0
def main():
    ray.init(ignore_reinit_error=True)
    log.setLevel("DEBUG")

    test_job_light()
    test_job_m()
コード例 #11
0
args = parser.parse_args()

# Load options from envs
for name in ['MASTER_ADDR', 'MASTER_PORT']:
    setattr(args, name.lower(), os.environ[name])

for name in [
        'RANK', 'WORLD_SIZE', 'LOCAL_RANK', 'WORLD_LOCAL_SIZE',
        'WORLD_NODE_RANK'
]:
    setattr(args, name.lower(), int(os.environ[name]))

args.node_rank = args.world_node_rank

log.setLevel(args.verbosity)
my_log(args)
ss.init()

if not args.cpu:
    torch.cuda.set_device(args.local_rank)
    torch.backends.cudnn.benchmark = True

if args.deterministic:
    torch.manual_seed(args.rank)
    np.random.seed(args.rank)
    torch.backends.cudnn.deterministic = True
    torch.cuda.manual_seed(args.rank)

dist.init_process_group(backend=args.backend)
コード例 #12
0
__all__ = ['CephReader']

import ceph
import glog

glog.setLevel(glog.logging.ERROR)


class CephReader:
    def __call__(self, path):
        s3client = ceph.S3Client()
        content = s3client.Get(path)
        return content
コード例 #13
0
def main(args):
    # load data
    starter_time = time.time()
    kwargs = {'num_workers': 4, 'pin_memory': True}

    print("loading train data ...")
    trainset = PointCloudDataset(args.train_json)
    train_loader = torch.utils.data.DataLoader(
        trainset,
        batch_size=args.batch_size,
        shuffle=True,
        **kwargs,
    )
    print("loading test data ...")
    testset = PointCloudDataset(args.test_json)
    test_loader = torch.utils.data.DataLoader(testset,
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)
    print("Initialize cache={}".format(time.time() - starter_time))

    im_encoder = SegNet(input_channels=3, output_channels=3)
    pointVAE = PointVAE(args=args)
    #net = GeneratorVAE(
    #	encoder_dim=(3, 3),
    #	grid_dims=(32, 32, 1),
    #	Generate1_dims=259,
    #	Generate2_dims=1091,
    #	Generate3_dims=1219,
    #	args=args,
    #)
    net = GeneratorVAE(
        im_encoder=im_encoder,
        pointVAE=pointVAE,
        encoder_dim=(3, 3),
        grid_dims=(32, 32, 1),
        Generate1_dims=259,
        Generate2_dims=1091,
        Generate3_dims=1219,
        args=args,
    )
    #init_weights(net, init_type="xavier")

    logger = logging.getLogger()
    file_log_handler = logging.FileHandler(args.log_dir + args.log_filename)
    logger.addHandler(file_log_handler)

    stderr_log_handler = logging.StreamHandler(sys.stdout)
    logger.addHandler(stderr_log_handler)

    logger.setLevel('INFO')
    formatter = logging.Formatter()
    file_log_handler.setFormatter(formatter)
    stderr_log_handler.setFormatter(formatter)
    logger.info(args)

    criterion_I = MaskedL1().to(args.device)
    criterion_PTC = ChamfersDistance().to(args.device)

    optimizer_image = torch.optim.Adam(
        im_encoder.parameters(),
        lr=args.lr_image,
        betas=(args.adam_beta1, 0.999),
        weight_decay=args.weight_decay,
    )
    optimizer_VAE = torch.optim.Adam(
        pointVAE.parameters(),
        lr=args.lr_vae,
        betas=(args.adam_beta1, 0.999),
        weight_decay=args.weight_decay,
    )
    optimizer = torch.optim.Adam(
        net.parameters(),
        lr=args.lr,
        betas=(args.adam_beta1, 0.999),
        weight_decay=args.weight_decay,
    )

    lr_scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer,
        step_size=args.lr_decay_step,
        gamma=args.lr_decay_rate,
    )

    # train and test

    runner = TrainTester(
        net=net,
        criterion_I=criterion_I,
        criterion_PTC=criterion_PTC,
        optimizer=optimizer,
        optimizer_image=optimizer_image,
        optimizer_VAE=optimizer_VAE,
        lr_scheduler=lr_scheduler,
        logger=logger,
        args=args,
    )

    if args.train:
        runner.run(
            train_loader=train_loader,
            test_loader=test_loader,
        )
        logger.info('Training Done!')

    if args.test:
        runner.test(
            epoch=args.total_epochs + 1,
            loader=test_loader,
        )
        logger.info('Testing Done!')
コード例 #14
0
        action="store_true",
        help="show plot window",
    )
    output_opts.add_argument("--save_plots",
                             action="store_true",
                             help="Save plots?")
    output_opts.add_argument("--save_boxplots",
                             action="store_true",
                             help="Save boxplots?")
    output_opts.add_argument("--save_results",
                             action="store_true",
                             help="Save results?")

    main_parser = argparse.ArgumentParser(description="{}".format(basic_desc))
    sub_parsers = main_parser.add_subparsers(dest="subcommand")
    sub_parsers.required = True
    return shared_parser


import argcomplete
import sys
if __name__ == '__main__':
    log.setLevel('INFO')
    parser = parser()
    argcomplete.autocomplete(parser)
    args = parser.parse_args()
    if run(args):
        sys.exit(os.EX_OK)
    else:
        raise Exception("Regression tests failed.")
コード例 #15
0
import gns_pod_tool as tool
import sys
import glog
import gns_sum_orbdif as sum_dif

parser = argparse.ArgumentParser(description="Demo of argparse")
parser.add_argument('--year', '-yr', '-yyyy', default='2018')
parser.add_argument('--idoy', default='001')
parser.add_argument('--bin', default='001')
parser.add_argument('--xml', default='001')
parser.add_argument('--dir', default='001')

args = parser.parse_args()
print(args)

glog.setLevel("ERROR")

doy_xml = ""
xml_dir = args.xml  # xml file for each day, format is yyyydoy.xml
log_dir = args.dir
bin_dir = args.bin
grt_bin = "/project/jdhuang/GNSS_software/GREAT/great_pco_L3/build/Bin/"
scp_dir = "/workfs/jdhuang/great_projects/e_all/scripts/great"  # scripts dir
mod_xml = "/workfs/jdhuang/great_projects/e_all/scripts/great/gnss_tb_ge_3_freq.xml"  # model xml
python_version = "python3.7"  # python version

year = '%04s' % args.year
idoy = '%03s' % args.idoy

yeardoy = "%04s%03s" % (year, idoy)
コード例 #16
0
ファイル: main.py プロジェクト: unjambonakap/chdrft
    def __call__(self, force=False):
        f = inspect.currentframe().f_back

        if not force and not f.f_globals['__name__'] == '__main__':
            return

        if 'main' in f.f_globals:
            parser = None
            parser = argparse.ArgumentParser(
                formatter_class=argparse.ArgumentDefaultsHelpFormatter)
            parser.add_argument('--verbosity', type=str, default='ERROR')
            parser.add_argument('--log_file', type=str)
            parser.add_argument('--runid', type=str, default='default')
            want_cache = 'cache' in f.f_globals and not is_python2
            cache = None
            if want_cache:
                cache_argparse(parser)

            if 'args' in f.f_globals:
                args_func = f.f_globals['args']
                args_func(parser)

            parser.add_argument('other_args',
                                nargs=argparse.REMAINDER,
                                default=['--'])

            if not is_python2:
                argcomplete.autocomplete(parser)
            flags = parser.parse_args()
            if flags.other_args and flags.other_args[0] == '--':
                flags.other_args = flags.other_args[1:]
            self.flags = flags
            for k, v in self.override_flags.items():
                setattr(self.flags, k, v)

            glog.setLevel(flags.verbosity)
            if flags.log_file:
                glog.logger.addHandler(logging.FileHandler(flags.log_file))

            if 'flags' in f.f_globals:
                f.f_globals['flags'] = flags

            if want_cache:
                cache = FileCacheDB.load_from_argparse(flags)
                f.f_globals['cache'] = cache

            main_func = f.f_globals['main']
            if is_python2:
                main_func()
            else:
                with ExitStack() as stack:
                    self.stack = stack
                    stack.enter_context(self.global_context)
                    script_name = sys.argv[0]
                    plog_filename = '/tmp/opa_plog_{}_{}.log'.format(
                        os.path.basename(script_name), flags.runid)

                    plog_file = open(plog_filename, 'w')
                    stack.enter_context(plog_file)
                    self.plog_file = plog_file

                    if cache:
                        stack.enter_context(cache)
                    main_func()
コード例 #17
0
ファイル: main.py プロジェクト: unjambonakap/ctf
from chdrft.emu.structures import StructBuilder, CodeStructExtractor, Structure, StructBackend, g_data, MemBufAccessor
from chdrft.emu.func_call import AsyncMachineCaller, FuncCallWrapperGen, FunctionCaller, AsyncFunctionCaller, SimpleBufGen
import unicorn as uc
import unicorn.x86_const as ucx86
from chdrft.emu.elf import ElfUtils
from chdrft.emu.trace import TraceEvent, WatchedMem, WatchedRegs, Display
import chdrft.emu.trace as trace
import traceback as tb
import numpy as np
import binascii
import struct
from chdrft.tube.fifo import ManagedBidirectionalFifo
from chdrft.tube.process import Process
import curses.ascii
import time
glog.setLevel(glog.INFO)
import itertools
import os
import ctypes
import ctypes.util
from chdrft.utils.swig import swig_unsafe
from collections import defaultdict
import pefile
import yaml
import chdrft.emu.structures as Structures
import subprocess as sp
from chdrft.dbg.gdbdebugger import GdbDebugger, launch_gdb
from concurrent.futures import wait
import tempfile
import re
import multiprocessing
コード例 #18
0
import argparse
import os
import gns_pod_tool as tool
import gns_sum_orbdif as sum_dif
import sys
import glog

parser = argparse.ArgumentParser(description="Demo of argparse")
parser.add_argument('--year', '-yr', '-yyyy', default='2018')
parser.add_argument('--idoy', default='001')
parser.add_argument('--ilen', default='001')  # 只测试过1天解

args = parser.parse_args()
print(args)  # 输出传入参数

glog.setLevel("ERROR")  # 只输出错误信息和提示信息

doy_xml = ""
mod_xml = "/workfs/jdhuang/great_projects/e_all/scripts/great/gnss_pod_e_157_if_ecom2_merge.xml"  # model xml
scp_dir = "/workfs/jdhuang/great_projects/e_all/scripts/great"  # scripts dir
prj_dir = "/workfs/jdhuang/great_projects/e_all/if_157_ecom2_merge"  # project dir
xml_dir = "/workfs/jdhuang/great_projects/e_all/if_157_ecom2_merge/xml"  # xml file for each day, format is yyyydoy.xml
prj_log = "/workfs/jdhuang/great_projects/e_all/if_15_ecom2_new"  # 周跳探测的log文件
prj_log13 = "/workfs/jdhuang/great_projects/e_all/if_17_ecom2_merge"  # 周跳探测的log文件
bin_dir = "/project/jdhuang/GNSS_software/GREAT/pod_3_freq_IF/build/Bin"  # bin dir for great or other app
prd_dir = "./result"  # result dir for saving products
log_dir = "./logInfo"  # log dir for app and cmd log file
python_version = "python3.7"  # python version

year = '%04s' % args.year  # 打印为yyyy的格式
idoy = '%03s' % args.idoy  # 打印为doy的格式,右对齐补零
コード例 #19
0
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
""")
ap.add_argument("-f",
                "--file",
                required=True,
                help="path to the input xlsx file to generate configs to")
ap.add_argument("-t",
                "--template",
                required=True,
                help="path to the template to use to generate config files")

args = vars(ap.parse_args())
log.setLevel(args['verbosity'])
log.info("Logs initialized successfully.")

general_params.load_params(args)
log.info("General params initialized successfully.")


def main():
    log.info("main")
    from xls import get_config_data
    from config import cfg_writer
    for row in get_config_data():
        cfg_writer.write(row, f"{row['hostname']}.txt")
    log.info('main -> done')

コード例 #20
0
                trainGraphs.extend(graphFolds[i])

        hist = trainThenValid(trainGraphs, validGraphs, '%d-%d' % (runId, f))
        cvMetrics.append(hist)

    avgMetrics = averageMetrics(cvMetrics)
    df = pd.DataFrame.from_dict(avgMetrics)
    histFile = open(
        '%sGpu%sRun%s.csv' % (cmd_args.data, cmd_args.gpu_id, runId), 'w')
    histFile.write("# %s\n" % str(gHP))
    df.to_csv(histFile, index_label='Epoch', float_format='%.6f')
    histFile.close()


if __name__ == '__main__':
    log.setLevel("INFO")
    random.seed(cmd_args.seed)
    np.random.seed(cmd_args.seed)
    torch.manual_seed(cmd_args.seed)

    startTime = time.process_time()
    graphs = loadGraphsMayCache(cmd_args.train_dir)
    normalizeFeatures(graphs, isTestSet=False, operation=cmd_args.norm_op)
    trainGraphs = filterOutNoEdgeGraphs(trainGraphs)
    dataReadyTime = time.process_time() - startTime
    log.info('Dataset ready takes %.2fs' % dataReadyTime)

    for (id, hp) in enumerate(HyperParameterIterator(cmd_args.hp_path)):
        for (key, val) in hp.items():
            gHP[key] = val
コード例 #21
0
ファイル: data_util.py プロジェクト: moomou/mlab
                _encode_data((g, mode, sr)) for g in group if g
            ]

        for data, duration in data_duration_tuple:
            if duration < 0.7:
                # skip data with less than 800ms
                continue

            assert not np.isnan(
                data).any(), 'Invalid data generated:: %s' % group

            all_data.append(data)
            total_duration_sec += duration

    return total_duration_sec, all_data


p = mp.Pool(POOL_SIZE)
if __name__ == '__main__':
    import fire

    if os.environ.get('DEBUG'):
        glog.setLevel('DEBUG')

    sample_file = os.path.join(VCTK_ROOT, 'p225', 'p225_001.wav')
    fire.Fire({
        't':
        lambda: process_wav(sample_file, mode=DataMode.mfcc_delta).all() and
        'fin',
    })