示例#1
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(log_file = 'log_train_seglink_%d_%d.log'%image_shape, log_path = FLAGS.train_dir, stdout = False, mode = 'a')
    
    
    config.init_config(image_shape, 
                       batch_size = FLAGS.batch_size, 
                       weight_decay = FLAGS.weight_decay, 
                       num_gpus = FLAGS.num_gpus, 
                       train_with_ignored = FLAGS.train_with_ignored,
                       seg_loc_loss_weight = FLAGS.seg_loc_loss_weight, 
                       link_cls_loss_weight = FLAGS.link_cls_loss_weight, 
                       )

    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu
        
    tf.summary.scalar('batch_size', batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    util.proc.set_proc_name(FLAGS.model_name + '_' + FLAGS.dataset_name)
    
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset)
    return dataset
示例#2
0
文件: cfg.py 项目: AmesianX/mcsema
def get_cfg(args, fixed_args):
  # Parse any additional args
  parser = argparse.ArgumentParser()

  parser.add_argument(
      '--recover-stack-vars',
      help='Flag to enable stack variable recovery',
      default=False,
      action='store_true')

  parser.add_argument(
      "--std-defs",
      action='append',
      type=str,
      default=[],
      help="std_defs file: definitions and calling conventions of imported functions and data")

  extra_args = parser.parse_args(fixed_args)

  if extra_args.recover_stack_vars:
    RECOVER_OPTS['stack_vars'] = True

  # Setup logger
  util.init_logger(args.log_file)

  # Load the binary in binja
  bv = util.load_binary(args.binary)

  # Once for good measure.
  bv.add_analysis_option("linearsweep")
  bv.update_analysis_and_wait()

  # Twice for good luck!
  bv.add_analysis_option("linearsweep")
  bv.update_analysis_and_wait()

  # Collect all paths to defs files
  log.debug('Parsing definitions files')
  def_paths = set(map(os.path.abspath, extra_args.std_defs))
  def_paths.add(os.path.join(DISASS_DIR, 'defs', '{}.txt'.format(args.os)))  # default defs file

  # Parse all of the defs files
  for fpath in def_paths:
    if os.path.isfile(fpath):
      parse_defs_file(bv, fpath)
    else:
      log.warn('%s is not a file', fpath)

  # Recover module
  log.debug('Starting analysis')
  pb_mod = recover_cfg(bv, args)

  # Save cfg
  log.debug('Saving to file: %s', args.output)
  with open(args.output, 'wb') as f:
    f.write(pb_mod.SerializeToString())

  return 0
示例#3
0
def user_menu(session, directory):
    """ Show the menu in the first tmux window.
    Retrieve the information about the devices to use for the analysis
    and the folder containig the apps to analyze."""
    devices = {}
    uManagers = []

    logger = util.init_logger(config.configFile, 'Manager')
    logger.info('BareDroid init')
    devices = initDevice(logger)

    windows = divide_et_impera(devices, session)

    #watch log and errors
    logFolder = os.path.join(os.getcwd(),\
                    util.read_config(config.configFile, 'Log', 'folder'))
    windows[1].list_panes()[0].send_keys('tail -F %smanager.log'%logFolder)
    windows[1].list_panes()[1].send_keys('tail -F %smanager_error.log'%logFolder)

    ans=True
    while ans:
        print '-----------------'
        print ("""
    1) List devices
    2) Start Analysis
    3) Stop Analysis
    4) Select Devices
    q) Quit
    m) Menu
        """)
        print ''
        ans=raw_input(">> ")
        if ans=="1":
            listDevices(config.deviceInfoFile, logger)
        elif ans=="2":
            folder = createFolder(logger)
            #initialize
            uManagers = initAnalysis(directory,\
                int(util.read_config(config.configFile,'Analysis', 'apps')),\
                logger, devices)
            #start
            startAnalysis(uManagers, folder, windows, logger)
        elif ans=="3":
            stopAnalysis(uManagers)
        elif ans=="4":
            #TODO select devices
            print 'TODO'
        elif ans=='m' or ans == 'M':
            print ''
        elif ans=='q' or ans == 'Q':
            check(directory, logger)
            logger.info('exit')
            #print print_on_window(windows[0], '\nExit')
            quit()
        elif ans !="":
            print("\n Not valid, try again...")
            ans = True
        else:
            print("\n Not valid, try again...")
            ans = True
示例#4
0
def convert():
    timestr = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
    log_path = os.path.join(config.RAW_RESOURCE_PATH, "log", "%s.log" % timestr)
    util.init_logger(log_path, "debug")
    startTime = time.time()
    logging.info(
        "start convert, rawResoucePath=%s, binResoucePath=%s, target=%s",
        config.RAW_RESOURCE_PATH,
        config.BIN_RESOURCE_PATH,
        config.TARGET,
    )

    if config.CONVERT_FONT:
        convert_font()
        # gen.generate_resource("font")

    if config.CONVERT_MUSIC:
        convert_music()
        # gen.generate_resource("music")

    if config.CONVERT_STRINGS:
        convert_strings()
        gen.generate_resource("string")

    if config.CONVERT_COLORS:
        convert_colors()
        gen.generate_resource("color")

    if config.CONVERT_SPRITE:
        convert_sprite()
        convert_patch_sprite()
        gen.generate_resource("sprite")
        gen.generate_resource("patch_sprite")

    if config.CONVERT_UI:
        convert_ui()
        gen.generate_ui()
        gen.generate_ui_command()

    if config.CONVERT_SCENE:
        convert_scene()
        gen.generate_resource("scene")

    endTime = time.time()
    logging.info("convert end, used:%f seconds", endTime - startTime)
示例#5
0
def main():
    "Entry point for the CLI DBus interface."
    args = docopt(__doc__, version="0.2")
    util.init_logger(args["--log"], args["-v"], args["--quiet"])
    dbus = BlockifyDBus()

    if args["toggle"]:
        dbus.playpause()
    elif args["next"]:
        dbus.next()
    elif args["prev"]:
        dbus.prev()
    elif args["play"]:
        dbus.play()
    elif args["stop"]:
        dbus.stop()

    if args["openuri"]:
        dbus.open_uri(args["<uri>"])
    elif args["seek"]:
        dbus.seek(args["<secs>"])
    elif args["setpos"]:
        dbus.set_pos(args["<pos>"])

    if args["title"]:
        print dbus.get_song_title()
    elif args["artist"]:
        print dbus.get_song_artist()
    elif args["status"]:
        print dbus.get_song_status()
    elif args["all"]:
        dbus.print_info()
    elif args["get"]:
        length = dbus.get_song_length()
        m, s = divmod(length, 60)
        if args["length"]:
            print "{}m{}s ({})".format(m, s, length)
        else:
            rating = dbus.get_property("Metadata")["xesam:autoRating"]
            artist = dbus.get_song_artist()
            title = dbus.get_song_title()
            album = dbus.get_song_album()
            state = dbus.get_song_status()
            print "{} - {} ({}), {}m{}s, {} ({})".format(artist, title, album,
                                                         m, s, rating, state)
示例#6
0
def main():
    "Entry point for the CLI-version of Blockify."
    # Log level to fall back to if we get no user input
    level = 2
    
    try:
        args = docopt(__doc__, version=VERSION)
        # 
        if args["-v"] == 0:
            args["-v"] = level
        util.init_logger(args["--log"], args["-v"], args["--quiet"])
    except NameError:
        util.init_logger(logpath=None, loglevel=level, quiet=False)
        log.error("Please install docopt to use the CLI.")
 
    blocklist = Blocklist(util.get_configdir())
    blockify = Blockify(blocklist)
    blockify.start()
示例#7
0
    def __init__(self, device):
        self._device = device
        self._shell = adb.AndroidDebugBridge(self.getDeviceId())
        self._dict = {} #each device has its own list of apps to test (reason: different device)
        self._process = None
        self._pid = 999999
        label = 'Manager.Update - ' + self.getDeviceId()
        self._logger = util.init_logger(config.configFile, label)

        self._queue = Queue() # used to share message (for now only exit)
示例#8
0
def main(_):
    util.init_logger()
    dump_path = util.io.get_absolute_path('~/temp/no-use/seglink/')
    
    dataset = config_initialization()
    batch_queue = create_dataset_batch_queue(dataset)
    batch_size = config.batch_size
    summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        tf.train.start_queue_runners(sess)
        b_image, b_seg_label, b_seg_offsets, b_link_label = batch_queue.dequeue()
        batch_idx = 0;
        while True: #batch_idx < 50:
            image_data_batch, seg_label_data_batch, seg_offsets_data_batch, link_label_data_batch = \
                            sess.run([b_image, b_seg_label, b_seg_offsets, b_link_label])
            for image_idx in xrange(batch_size):
                image_data = image_data_batch[image_idx, ...]
                seg_label_data = seg_label_data_batch[image_idx, ...]
                seg_offsets_data = seg_offsets_data_batch[image_idx, ...]
                link_label_data = link_label_data_batch[image_idx, ...]
                
                image_data = image_data + [123, 117, 104]
                image_data = np.asarray(image_data, dtype = np.uint8)
                
                # decode the encoded ground truth back to bboxes
                bboxes = seglink.seglink_to_bbox(seg_scores = seg_label_data, 
                                                 link_scores = link_label_data, 
                                                 seg_offsets_pred = seg_offsets_data)
                
                # draw bboxes on the image
                for bbox_idx in xrange(len(bboxes)):
                    bbox = bboxes[bbox_idx, :] 
                    draw_bbox(image_data, bbox)
                
                image_path = util.io.join_path(dump_path, '%d_%d.jpg'%(batch_idx, image_idx))
                util.plt.imwrite(image_path, image_data)
                print 'Make sure that the text on the image are correctly bounded\
                                                         with oriented boxes:', image_path 
            batch_idx += 1
示例#9
0
文件: tnsd.py 项目: bufferx/tns
def main():
    ''' main function
    '''
    init_options()
    init_logger()

    # 忽略Broken Pipe信号
    signal.signal(signal.SIGPIPE, signal.SIG_IGN);
                        
    # 处理kill信号
    signal.signal(signal.SIGINT, handle_signal_kill)
    signal.signal(signal.SIGQUIT, handle_signal_kill)
    signal.signal(signal.SIGTERM, handle_signal_kill)
    signal.signal(signal.SIGHUP, handle_signal_kill)

    g_logger.info('START TORNADO SERVER ...')

    for key, option in options.iteritems():
        g_logger.info('Options: (%s, %s)', key, option.value())

    try:
        if sys.version_info[:3] >= (2, 5, 2):
            #pycurl minimum supported version is 7.18.2
            AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
            pass

        HTTPServer.instance().start()
        tornado.ioloop.IOLoop.instance().start()

        HTTPServer.instance().stop()
        tornado.ioloop.IOLoop.instance().close(all_fds=True)

        g_logger.info('STOP TORNADO SERVER ...')
    except socket.error as e:
        g_logger.warning('Socket Error: %s', e, exc_info=True)
    except KeyboardInterrupt as e:
        g_logger.warning('Gently Quit')
    except Exception as e:
        g_logger.error('UnCaught Exception: %s', e, exc_info=True)
示例#10
0
        self._exception_caught.connect(show_exception_box)

    def exception_hook(self, exc_type, exc_value, exc_traceback):
        if issubclass(exc_type, KeyboardInterrupt):
            # ignore keyboard interrupt to support console applications
            sys.__excepthook__(exc_type, exc_value, exc_traceback)
        else:
            log_msg = '\n'.join([
                ''.join(traceback.format_tb(exc_traceback)),
                '{0}: {1}'.format(exc_type.__name__, exc_value)
            ])

            # trigger message box show
            self._exception_caught.emit(log_msg)
        sys._excepthook(exc_type, exc_value, exc_traceback)


if __name__ == '__main__':
    if len(sys.argv) == 2 and sys.argv[1] == "--linux-recorder":
        from linux_keystroke_recorder import linux_keystroke_recorder

        linux_keystroke_recorder()
    else:
        appctxt = ApplicationContext()  # 1. Instantiate ApplicationContext
        init_logger()
        qt_exception_hook = UncaughtHook()
        window = MainWindow(appctxt)
        window.show()
        exit_code = appctxt.app.exec_()  # 2. Invoke appctxt.app.exec_()
        sys.exit(exit_code)
示例#11
0
def main():
    p4a = os.environ.get('PYTHON_SERVICE_ARGUMENT', '')
    global _LOGGER

    if len(p4a):
        args = json.loads(p4a)
        # hostlisten
        # portlisten
        # hostconnect
        # portconnect
        # connect_retry
        # connect_secs
        # db_fname
    else:
        parser = argparse.ArgumentParser(prog=__prog__)
        parser.add_argument('--portlisten',
                            type=int,
                            help='port number',
                            required=False,
                            default=11001)
        parser.add_argument('--hostlisten', required=False, default="0.0.0.0")
        parser.add_argument('--ab_portconnect',
                            type=int,
                            help='port number',
                            required=False,
                            default=9004)
        parser.add_argument('--ab_hostconnect',
                            required=False,
                            default="127.0.0.1")
        parser.add_argument('--ab_portlisten',
                            type=int,
                            help='port number',
                            required=False,
                            default=9003)
        parser.add_argument('--ab_hostlisten',
                            required=False,
                            default="0.0.0.0")
        parser.add_argument('--connect_retry',
                            type=int,
                            help='connect retry',
                            required=False,
                            default=10)
        parser.add_argument('--connect_secs',
                            type=int,
                            help='connect secs',
                            required=False,
                            default=5)
        parser.add_argument('--db_fname',
                            required=False,
                            help='DB file path',
                            default=join(dirname(__file__), '..', 'maindb.db'))
        parser.add_argument('--verbose', required=False, default="INFO")
        argall = parser.parse_known_args()
        args = dict(vars(argall[0]))
        args['undo_info'] = dict()
        args['notify_screen_on'] = -1
        args['notify_every_ms'] = -1
        import sys
        sys.argv[1:] = argall[1]
    args['android'] = len(p4a)
    _LOGGER = init_logger(__name__, get_verbosity(args['verbose']))
    _LOGGER.info(f"Server: p4a = {p4a}")
    _LOGGER.debug(f"Server: test debug {args}")
    loop = asyncio.get_event_loop()
    dms = DeviceManagerService(loop=loop, **args)
    try:
        loop.run_until_complete(dms.start())
        loop.run_forever()
    except Exception:
        _LOGGER.error(f'DMS error {traceback.format_exc()}')
    finally:
        try:
            loop.run_until_complete(dms.stop())
            _LOGGER.info("Server: Closing loop")
            loop.close()
        except Exception:
            _LOGGER.error("Server: " + traceback.format_exc())
# @notice we don't use HOME env. variable ( os.path.expanduser("~") or os.getenv('HOME') ) since when this script gets called with 'sudo'
# it may, depending on the system security policy, give us home directory of the original caller, which is in most cases not what we want.
# For example HOME env. variable (~) is set to target's user in Debian 8
# ('sudo' acts as 'sudo -H') but is preserved in Ubuntu 16 ('sudo' acts as
# 'sudo -E')
HOME_DIR = pwd.getpwuid(os.getuid()).pw_dir
TMP_DIR = os.path.join(HOME_DIR, 'tmp')
KEYSTORE_DIR = os.path.join(HOME_DIR, '.keytalk/keystore')
CONFIG_FILE_PATH = '/etc/keytalk/apache.ini'
KTCLIENT_APP_PATH = '/usr/local/bin/keytalk/ktclient'
LOG_FILE_PATH = os.path.join(TMP_DIR, 'ktapachecertrenewal.log')
CRON_LOG_FILE_PATH = os.path.join(TMP_DIR, 'cron.ktapachecertrenewal.log')
KTCLIENT_LOG_PATH = os.path.join(HOME_DIR, '.keytalk/ktclient.log')
Logger = util.init_logger('keytalk', LOG_FILE_PATH,
                          "KeyTalk Apache certificate renewal", "DEBUG",
                          "INFO")

# Globals
error_messages = []
warning_messages = []
all_error_messages = []
all_warning_messages = []
force_arg = '--force'


def shellquoted_site(site):
    newdict = {}
    for key, val in site.iteritems():
        if isinstance(val, basestring):
            newdict[key] = pipes.quote(val)
示例#13
0
    for attack_class, attack_params in attack_param_grid.items():
        logger.debug("Starting attack images with {}".format(
            attack_class.__name__))

        attack = attack_class(model, sess)

        if not os.path.exists(attack_params['adv_dir']):
            os.mkdir(attack_params['adv_dir'])

        attack.generate(x, **attack_params)


if __name__ == '__main__':
    tf.set_random_seed(2019)

    logger = init_logger("adversarials-2dadelta")

    parser = argparse.ArgumentParser()

    # Common parameters
    parser.add_argument('--train-dir', default=DEFAULT_TRAIN_DIR)
    parser.add_argument('--val-dir', default=DEFAULT_VAL_DIR)
    parser.add_argument('--test-dir', default=DEFAULT_TEST_DIR)
    parser.add_argument('--checkpoint-path', default=DEFAULT_CHECKPOINT_PATH)

    # Train parameters
    parser.add_argument('--num-epochs', default=DEFAULT_NUM_EPOCHS)
    parser.add_argument('--train-batch-size', default=DEFAULT_TRAIN_BATCH_SIZE)
    parser.add_argument('--initial-learning-rate',
                        default=DEFAULT_INITIAL_LEARNING_RATE)
    parser.add_argument('--learning-rage-decay',
示例#14
0
def init():
    """ Program entry, a simple command line interface"""
    parser = argparse.ArgumentParser()
    parser.add_argument('--target',
                        required=True,
                        type=str,
                        choices=['ais', 'cgi'],
                        help='targeting system')
    parser.add_argument('--proxy', type=int, help='local proxy port')
    parser.add_argument('--crawler',
                        type=str,
                        default='crawler.txt',
                        help='crawler api list')
    parser.add_argument('--ais',
                        type=str,
                        default='ais.json',
                        help='ais account in json format')
    parser.add_argument('--log_dir',
                        type=str,
                        default=os.path.join(os.curdir, 'logs'),
                        help='directory to save logs')
    parser.add_argument('--log_name',
                        type=str,
                        default='visa_fetcher',
                        help='name of log file')
    parser.add_argument('--debug',
                        action='store_true',
                        default=False,
                        help='log debug information')
    parser.add_argument('--noinit_lw',
                        action='store_true',
                        default=False,
                        help='whether not to initiate the latest_written')
    args = parser.parse_args()

    if not os.path.exists(args.log_dir):
        os.mkdir(args.log_dir)

    G.assign('target_system', args.target)
    G.assign('session_file', f'{args.target}-session.json')
    G.assign('crawler_path', args.crawler)
    G.assign(
        'proxies', {
            'http': f'socks5h://127.0.0.1:{args.proxy}',
            'https': f'socks5h://127.0.0.1:{args.proxy}',
        } if args.proxy is not None else None)
    G.assign('log_dir', args.log_dir)
    G.assign('log_name', f'{args.target}_{args.log_name}')

    if args.target.lower() == 'ais':
        with open(args.ais) as f:
            ais_accounts = json.load(f)
            for k, v in ais_accounts.items():
                G.assign(k, v)

    if not args.noinit_lw:
        DB.VisaStatus.initiate_latest_written_sequential(args.target)

    global LOGGER
    global SESSION_CACHE
    LOGGER = util.init_logger(f'{args.target}_{args.log_name}', args.log_dir,
                              args.debug)
    SESSION_CACHE = SessionCache()

    LOGGER.info('FETCHING TARGET: %s', args.target.upper())
                        if search_exception(i.body):
                            logger.info("selected issues:" + i.title + ":" +
                                        i.html_url)
                            # !!! important
                            fp.write(repo.full_name + "," + i.html_url + "," +
                                     i.title + "\n")
                            fp.flush()
                            selected_no = selected_no + 1
                    # ilistdict[i.html_url] = repo.full_name
                else:
                    logger.info("nonselected issues:" + i.title + ":" +
                                i.html_url)


if __name__ == "__main__":
    init_logger(__file__)

    from persontoken import MY_TOKENs

    tklen = len(MY_TOKENs)
    tk_i = 0
    ss = SS(port=7890)
    android = False
    id = 12
    while True:
        g = Github(MY_TOKENs[tk_i % tklen])
        try:
            # get_top_java_repo(g, 6, only_android=False)

            if android:
                urls = util.read_csv('f-droid/f-droid-github-filter.csv')
示例#16
0
文件: trainer.py 项目: kohilin/ealm
def train(opt):
    SAVE_DIR = opt.save
    LATEST_MODEL = os.path.join(SAVE_DIR, "latest-model.pt")
    BEST_REWARD_MODEL = os.path.join(SAVE_DIR, "best-reward-model.pt")
    CONFIG_FILE = os.path.join(SAVE_DIR, "config.json")
    LOG_FILE = os.path.join(SAVE_DIR, "log.txt")

    global logger
    logger = util.init_logger(LOG_FILE)

    with open(CONFIG_FILE, "w") as f:
        json.dump(vars(opt), f)

    bertnlp.init()

    agent = dqn.EditorialAgent(layer_num=opt.nlayer, hidden_dim=opt.hdim)

    train_sents = load_file(opt.t)

    memory = dqn.Memory(buffer_size=int(opt.memory))
    optimizer = torch.optim.Adam(agent.parameters(), lr=opt.lr)
    sentence_batcher = make_batcher(train_sents, 1)
    n_sentence_iter = 0
    model_update_interval = 1000
    max_avg_reward = -1
    try:
        for epoch in range(int(opt.e)):
            try:
                cur_sentences = next(sentence_batcher)
            except StopIteration:
                n_sentence_iter += 1
                logger.info("Corpus Iteration {}".format(n_sentence_iter))
                sentence_batcher = make_batcher(train_sents, 1)
                cur_sentences = next(sentence_batcher)

            items, reports = generate_episode_items(bertnlp,
                                                    agent,
                                                    cur_sentences,
                                                    min_cr=opt.min_cr,
                                                    min_rr=opt.min_rr)
            [
                memory.append(i) for i in chain.from_iterable(items)
                if not i.is_terminal
            ]

            if memory.size() < opt.batch:
                continue

            loss, reward = agent.replay(memory.sample(opt.batch))
            loss_, reward_ = step(loss, reward, optimizer, agent)

            msg = "Report : Epoch={} Reward={:.3f} Loss={:.3f} Eps1={:.3f} Eps2={:.3f}\n".format(
                epoch, reward_, loss_, agent.epsilon, agent.selection_epsilon)
            msg += "=" * 70 + "\n\t" + "\n\t".join(
                [i.report() for i in reports[0]]) + "\n" + "=" * 70
            logger.info(msg)

            if epoch != 0 and epoch % model_update_interval == 0:
                logger.info(
                    "Update latest model@Iteration {}".format(n_sentence_iter))
                save(agent, LATEST_MODEL)
                averaged_reward = memory.averaged_reward()
                if averaged_reward > max_avg_reward:
                    max_avg_reward = averaged_reward
                    save(agent, BEST_REWARD_MODEL)
                    logger.info(
                        "Update best reward model@Iteration{}(Averaged Reward={:.5f})"
                        .format(n_sentence_iter, max_avg_reward))

            if epoch != 0 and epoch % opt.decay_interval == 0:
                agent.apply_epsilon_decay()

    except KeyboardInterrupt:
        logger.info("Terminating process ... ")
        logger.info("done!")
示例#17
0
def main():
    "Entry point for the GUI-version of Blockify."
    # Edit this for less or more logging. Loglevel 0 is least verbose.
    util.init_logger(logpath=None, loglevel=2, quiet=False)
    BlockifyUI()
    gtk.main()
示例#18
0
def main():
    "Entry point for the GUI-version of Blockify."
    # Edit this for less or more logging. Loglevel 0 is least verbose.
    util.init_logger(logpath=None, loglevel=2, quiet=False)
    BlockifyUI()
    gtk.main()
示例#19
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-v",
                        "--verbosity",
                        action="count",
                        help="increase output verbosity")
    parser.add_argument("-c",
                        "--config",
                        help="path to the yaml configuration file")
    parser.add_argument(
        "-f",
        "--file",
        nargs='?',
        const='-',
        help=
        "check a protected file, retrieve a license; don't give the path to an LCP protected epub file if -e is used."
    )
    parser.add_argument(
        "-l",
        "--lcpl",
        nargs='?',
        const='-',
        help=
        "check an LCP license; don't give the path to an LCP license if -p  is used"
    )
    parser.add_argument(
        "-s",
        "--lsd",
        nargs='?',
        const='-',
        help=
        "launch lsd tests; don't give the path to an LCP license if -p or -l is used"
    )
    args = parser.parse_args()

    # Initialize logger
    util.init_logger(args.verbosity)

    # Load the configuration file
    try:
        config = TestConfig(args.config)
    except FileNotFoundError as err:
        LOGGER.error(err)
        return 1

    license_path = ""

    # Check a protected file, retrieve a license
    if args.file:
        # use the file argument value
        file_path = args.file
        lcpf_test_suite = LCPFTestSuite(config, file_path)
        if not lcpf_test_suite.run():
            return 2
        license_path = lcpf_test_suite.license_path

    # Check an LCP license
    if args.lcpl:
        # the lcpl argument value takes precedence over the preceding license_path value
        license_path = args.lcpl if args.lcpl != "-" else license_path
        lcpl_test_suite = LCPLTestSuite(config, license_path)
        if not lcpl_test_suite.run():
            return 3

    # Check a License Status Document
    # the lsd argument value takes precedence over the lcpl test return
    if args.lsd:
        license_path = args.lsd if args.lsd != "-" else license_path
        lsd_test_suite = LSDTestSuite(config, license_path)
        if not lsd_test_suite.run():
            return 4

    return 0
示例#20
0
"""
Tuning Regularization Hyperparameters for RNN-based Model
"""
import logging
import logging.config
import pandas as pd

from constants import HParamKey, DATA_PATH, EncoderType
# from constants import DefaultConfig as config
from best_config import OptimalRNNConfig as config
from supervisor import Supervisor
import util

# config logger
logger = logging.getLogger('__main__')
util.init_logger()

# tuning sets
dropout_sets = [0, 0.1, 0.2, 0.3]
wdecay_sets = [0, 1e-7, 1e-5, 1e-3, 1e-1]
# 20 pairs

spv = Supervisor(config)
spv.load_trained_emb()
spv.load_data()
spv.get_dataloader()

record_save_path = util.get_results_folder(
    base_path='/scratch/xl2053/nlp/hw2_data/results_rnn/'
)  # or base_path=DATA_PATH
logger.info(
示例#21
0
from commands.moderator import Moderator
from commands.objects.state import ClientState
from commands.objects.guildconfig import RoleConfig
from util import Util, CustomHelp, init_logger

from config.loader import cfg

path = os.path.dirname(os.path.abspath(__file__))

# Bot definition
client = Bot(command_prefix=cfg['prefix'],
             help_command=CustomHelp(),
             case_insensitive=True)

# Get logger
logger = init_logger(path)

# Init slotlist buffer
state = ClientState()

# Get guild configs
guildConfig = RoleConfig(os.path.join(path, 'config', 'guildConfig.yml'))
guildConfig.load()

# Add commands
client.add_cog(User(client=client, state=state, guild_config=guildConfig))
client.add_cog(Moderator(client=client, state=state, guild_config=guildConfig))
client.add_cog(Admin(client=client, state=state, guild_config=guildConfig))

client.add_cog(Util(client=client, cfg=cfg, logger=logger))
示例#22
0
# -*- coding: utf-8 -*-
#########################################################################
# File Name: __init__.py
# Author: xuyingquan
# mail: yingquan.xu@shatacloud
# Created Time: Sat May  7 10:33:08 CST 2016
#########################################################################

import util
import pika
import json
import traceback
import threading
import time

LOG = util.init_logger('messageclient', '/var/log/messageclient.log')

from messageclient.rabbitmq_driver.rabbit_engine import PikaEngine, Target, Transport
from messageclient.rabbitmq_driver.rabbit_engine import get_transport
from messageclient.rabbitmq_driver.rabbit_message import Message
from messageclient.rabbitmq_driver import Consumer, Publisher, RpcPublisher
from messageclient.rabbitmq_driver import on_message_v1


message_handler = dict()                        # 消息处理函数(用户定义)字典
receive_response_flag = False                    # 消息响应结果处理标志
response_result_lock = threading.Lock()         # 保护消息响应结果

__all__ = [
    "Target",
    "Transport",
示例#23
0
    ft3d_dataset = ft3d_filenames(args.dataset_path)

    tf.logging.set_verbosity(tf.logging.ERROR)
    dispnet = DispNet(mode="traintest", ckpt_path=args.checkpoint_path, dataset=ft3d_dataset,
                      batch_size=args.batch_size, is_corr=args.use_corr)

    ckpt = tf.train.latest_checkpoint(args.checkpoint_path)
    if not ckpt:
        if not os.path.exists(args.checkpoint_path):
            os.makedirs(args.checkpoint_path)
    model_name = "DispNet"
    if args.use_corr:
        model_name += "Corr1D"

    init_logger(args.checkpoint_path, name=model_name)
    writer = tf.summary.FileWriter(args.checkpoint_path)

    schedule_step = 50000
    if args.weight_schedule is True:
      weights_schedule = [[0., 0., 0., 0., .2, 1.],
                          [0., 0., 0., .2, 1., .5],
                          [0., 0., .2, 1., .5, 0.],
                          [0., .2, 1., .5, 0., 0.],
                          [.2, 1., .5, 0., 0., 0.],
                          [1., .5, 0., 0., 0., 0.],
                          [1., 0., 0., 0., 0., 0.]]
    else:
      weights_schedule = [[1., 1., 1., 1., 1., 1.]]

    lr_schedule = [1e-4] * 5
示例#24
0
def main(args):
    script_dir = Path.cwd()
    # args = util.get_config(default_file=script_dir / 'config.yaml')
    output_dir = script_dir / args.output_dir
    output_dir.mkdir(exist_ok=True)

    log_dir = util.init_logger(args.name, output_dir, 'logging.conf')
    logger = logging.getLogger()

    with open(log_dir / "args.yaml", "w") as yaml_file:  # dump experiment config
        yaml.safe_dump(args, yaml_file)

    pymonitor = util.ProgressMonitor(logger)
    tbmonitor = util.TensorBoardMonitor(logger, log_dir)
    monitors = [pymonitor , tbmonitor]

    if args.device.type == 'cpu' or not t.cuda.is_available() or args.device.gpu == []:
        args.device.gpu = []
    else:
        available_gpu = t.cuda.device_count()
        for dev_id in args.device.gpu:
            if dev_id >= available_gpu:
                logger.error('GPU device ID {0} requested, but only {1} devices available'
                             .format(dev_id, available_gpu))
                exit(1)
        # Set default device in case the first one on the list
        t.cuda.set_device(args.device.gpu[0])
        # Enable the cudnn built-in auto-tuner to accelerating training, but it
        # will introduce some fluctuations in a narrow range.
        t.backends.cudnn.benchmark = True
        t.backends.cudnn.deterministic = False

    # Initialize data loader
    train_loader, val_loader, test_loader = util.load_data(args.dataloader)
    logger.info('Dataset `%s` size:' % args.dataloader.dataset +
                '\n          Training Set = %d (%d)' % (len(train_loader.sampler), len(train_loader)) +
                '\n        Validation Set = %d (%d)' % (len(val_loader.sampler), len(val_loader)) +
                '\n              Test Set = %d (%d)' % (len(test_loader.sampler), len(test_loader)))
    
    # Define loss function (criterion) and optimizer
    criterion = t.nn.CrossEntropyLoss().to(args.device.type)

    # Create the model ( download )
    model = create_model(args)
    
    # load activation each layer to initialize activation quantize scale
    act_init = process.get_initialization(test_loader, model, criterion, -2, monitors, args)
    # replace model
    modules_to_replace = quan.find_modules_to_quantize(model, [args.quan, act_init])

    for name, module in model.named_modules():
        print(name)

    model = quan.replace_module_by_names(model, modules_to_replace)
    logger.info('Inserted quantizers into the original model')
    
    # print new model
    # for name, module in model.named_modules():
    #     print(name, '\t', type(module))

    # progressive quantization mode
    if args.quan.progress.mode is True:
        model, _, _= util.load_checkpoint(model, args.quan.progress.path, args.device.type, lean=args.resume.lean)        
        progress_act_init = process.get_initialization(test_loader, model, criterion, -2, monitors, args) if args.quan.progress.init_restart else None
        model = quan.change_bit_width(model, [args.quan.progress, progress_act_init])
        quan.check_changed_bit(model)



    # resume 
    start_epoch = 0
    if args.resume.path:
        model, start_epoch, _ = util.load_checkpoint(
            model, args.resume.path, args.device.type, lean=args.resume.lean)
        
    # data Parallel by new model
    if args.device.gpu and not args.dataloader.serialized:
        model = t.nn.DataParallel(model, device_ids=args.device.gpu)

    tbmonitor.writer.add_graph(model, input_to_model=train_loader.dataset[0][0].unsqueeze(0))
    model.to(args.device.type)
    
    # optimizer = t.optim.Adam(model.parameters(), lr=args.optimizer.learning_rate)
    optimizer = t.optim.SGD(model.parameters(),
                            lr=args.optimizer.learning_rate,
                            momentum=args.optimizer.momentum,
                            weight_decay=args.optimizer.weight_decay)
    lr_scheduler = util.lr_scheduler(optimizer,
                                     batch_size=train_loader.batch_size,
                                     num_samples=len(train_loader.sampler),
                                     **args.lr_scheduler)
    logger.info(('Optimizer: %s' % optimizer).replace('\n', '\n' + ' ' * 11))
    logger.info('LR scheduler: %s\n' % lr_scheduler)

    print(f'**********train_loader_sampler : {len(train_loader.sampler)}')

    perf_scoreboard = process.PerformanceScoreboard(args.log.num_best_scores)

    if args.eval:
        process.validate(test_loader, model, criterion, -1, monitors, args)
    else:  # training
        if args.resume.path or args.pre_trained:
            logger.info('>>>>>>>> Epoch -1 (pre-trained model evaluation)')
            top1, top5, _ = process.validate(val_loader, model, criterion,
                                             start_epoch - 1, monitors, args)
            perf_scoreboard.update(top1, top5, start_epoch - 1)
        for epoch in range(start_epoch, args.epochs):
            logger.info('>>>>>>>> Epoch %3d' % epoch)
            t_top1, t_top5, t_loss = process.train(train_loader, model, criterion, optimizer,
                                                   lr_scheduler, epoch, monitors, args)
            v_top1, v_top5, v_loss = process.validate(val_loader, model, criterion, epoch, monitors, args)

            tbmonitor.writer.add_scalars('Train_vs_Validation/Loss', {'train': t_loss, 'val': v_loss}, epoch)
            tbmonitor.writer.add_scalars('Train_vs_Validation/Top1', {'train': t_top1, 'val': v_top1}, epoch)
            tbmonitor.writer.add_scalars('Train_vs_Validation/Top5', {'train': t_top5, 'val': v_top5}, epoch)

            perf_scoreboard.update(v_top1, v_top5, epoch)
            is_best = perf_scoreboard.is_best(epoch)
            util.save_checkpoint(epoch, args.arch, model, {'top1': v_top1, 'top5': v_top5}, is_best, args.name, log_dir, args.dataloader.serialized)

        logger.info('>>>>>>>> Epoch -1 (final model evaluation)')
        process.validate(test_loader, model, criterion, -1, monitors, args)
    
    
    if args.save_all is True:
        filename = 'raw_model.pth'
        filepath = os.path.join(output_dir, filename)

    tbmonitor.writer.close()  # close the TensorBoard
    logger.info('Program completed successfully ... exiting ...')
    logger.info('If you have any questions or suggestions, please visit: github.com/zhutmost/lsq-net')
示例#25
0
        proc.kill()
        outs, errs = proc.communicate()

    outs = outs.decode()
    if outs != "" and not outs.isspace():
        logger.debug(f"pom_convert-Out: {outs}")

    errs = errs.decode()
    if errs != "" and not errs.isspace():
        logger.error(f"pom_convert-Err: {errs}")

    try:
        with open(json_path, 'r', encoding='utf-8') as f:
            result = json.load(f)
        return result
    except Exception:
        logger.error(f"pom_convert output is broken.")
        return dict()


if __name__ == '__main__':
    util.init_logger(__file__)
    result = sim('testfiles/', 'testfiles/')
    result = sim(
        'tmp/Jasig_CalendarPortlet_218edf8f6e55f41e1d6e54e9391affa390f83724',
        'tmp/AAPS-Omnipod_AndroidAPS')
    print(result)

    # result = pom2json('pom_convert/pom.xml', 'pom_json.json')
    # print(result)
示例#26
0
 def __init__(self, logger_name='tank', replay_size=int(1e4), summary=True):
     self.logger = init_logger(logger_name)
     self.epsilon = 0.1
     self.replay = Replay(replay_size)
     self.nn = NN(self.replay.sample_fn, summary)
示例#27
0
# -*- coding: utf-8 -*-
#########################################################################
# File Name: __init__.py
# Author: xuyingquan
# mail: yingquan.xu@shatacloud
# Created Time: Sat May  7 10:33:08 CST 2016
#########################################################################

import util
import pika
import json
import traceback
import threading
import time

LOG = util.init_logger('messageclient', '/var/log/messageclient.log')

from messageclient.rabbitmq_driver.rabbit_engine import PikaEngine, Target, Transport
from messageclient.rabbitmq_driver.rabbit_engine import get_transport
from messageclient.rabbitmq_driver.rabbit_message import Message
from messageclient.rabbitmq_driver import Consumer, Publisher, RpcPublisher
from messageclient.rabbitmq_driver import on_message_v1

message_handler = dict()  # 消息处理函数(用户定义)字典
receive_response_flag = False  # 消息响应结果处理标志
response_result_lock = threading.Lock()  # 保护消息响应结果

__all__ = [
    "Target",
    "Transport",
    "Message",
示例#28
0
        config.CATEGORY_URL_FILE_PATH + f
        for f in sorted(listdir(config.CATEGORY_URL_FILE_PATH))
    ]
    url_list = []
    # print(file_list)

    for file in file_list:
        with open(file, "r") as f:
            url_list = url_list + f.readlines()
            # print(len(url_list))
    return file_list, url_list


if __name__ == "__main__":

    parser = _build_parser()
    FLAGS = parser.parse_args()
    init_logger(logging._nameToLevel[FLAGS.log_level])
    log_args(FLAGS)

    # Indeed Category URL들을 Crawling
    if FLAGS.data == config.CRAWLING_OBJECT_LIST[0]:
        CL.write_search_url_list(FLAGS.flush_num, FLAGS.max_sleep)

    # data를 Crawling
    elif FLAGS.data == config.CRAWLING_OBJECT_LIST[1]:
        file_list, url_list = read_url_list()
        EL.write_emp_detail(url_list, FLAGS.max_sleep)
        print(file_list)
        print("has been written")
示例#29
0
def main():
    script_dir = Path.cwd()
    args = util.get_config(default_file=script_dir / 'config.yaml')

    output_dir = script_dir / args.output_dir
    output_dir.mkdir(exist_ok=True)

    log_dir = util.init_logger(args.name, output_dir, 'logging.conf')
    logger = logging.getLogger()

    with open(log_dir / "args.yaml",
              "w") as yaml_file:  # dump experiment config
        yaml.safe_dump(args, yaml_file)

    pymonitor = util.ProgressMonitor(logger)
    tbmonitor = util.TensorBoardMonitor(logger, log_dir)
    monitors = [pymonitor, tbmonitor]

    if args.device.type == 'cpu' or not t.cuda.is_available(
    ) or args.device.gpu == []:
        args.device.gpu = []
    else:
        available_gpu = t.cuda.device_count()
        for dev_id in args.device.gpu:
            if dev_id >= available_gpu:
                logger.error(
                    'GPU device ID {0} requested, but only {1} devices available'
                    .format(dev_id, available_gpu))
                exit(1)
        # Set default device in case the first one on the list
        t.cuda.set_device(args.device.gpu[0])
        # Enable the cudnn built-in auto-tuner to accelerating training, but it
        # will introduce some fluctuations in a narrow range.
        t.backends.cudnn.benchmark = True
        t.backends.cudnn.deterministic = False

    # Create the model
    model = create_model(args)

    start_epoch = 0
    perf_scoreboard = process.PerformanceScoreboard(args.log.num_best_scores)

    if args.resume.path:
        model, start_epoch, _ = util.load_checkpoint(model,
                                                     args.resume.path,
                                                     args.device.type,
                                                     lean=args.resume.lean)

    # Initialize data loader
    train_loader, val_loader, test_loader = util.load_data(
        args.dataloader.dataset, args.dataloader.path, args.batch_size,
        args.dataloader.workers, args.dataloader.val_split)
    logger.info('Dataset `%s` size:' % args.dataloader.dataset +
                '\n          training = %d (%d)' %
                (len(train_loader.sampler), len(train_loader)) +
                '\n        validation = %d (%d)' %
                (len(val_loader.sampler), len(val_loader)) +
                '\n              test = %d (%d)' %
                (len(test_loader.sampler), len(test_loader)))

    # Define loss function (criterion) and optimizer
    criterion = t.nn.CrossEntropyLoss().to(args.device.type)

    # optimizer = t.optim.Adam(model.parameters(), lr=args.optimizer.learning_rate)
    optimizer = t.optim.SGD(model.parameters(),
                            lr=args.optimizer.learning_rate,
                            momentum=args.optimizer.momentum,
                            weight_decay=args.optimizer.weight_decay)
    lr_scheduler = util.lr_scheduler(optimizer,
                                     batch_size=train_loader.batch_size,
                                     num_samples=len(train_loader.sampler),
                                     **args.lr_scheduler)
    logger.info(('Optimizer: %s' % optimizer).replace('\n', '\n' + ' ' * 11))
    logger.info('LR scheduler: %s\n' % lr_scheduler)

    if args.eval:
        process.validate(test_loader, model, criterion, -1, monitors, args)
    else:  # training
        if args.resume.path or args.pre_trained:
            logger.info('>>>>>>>> Epoch -1 (pre-trained model evaluation)')
            top1, top5, _ = process.validate(val_loader, model, criterion,
                                             start_epoch - 1, monitors, args)
            perf_scoreboard.update(top1, top5, start_epoch - 1)
        for epoch in range(start_epoch, args.epochs):
            logger.info('>>>>>>>> Epoch %3d' % epoch)
            t_top1, t_top5, t_loss = process.train(train_loader, model,
                                                   criterion, optimizer,
                                                   lr_scheduler, epoch,
                                                   monitors, args)
            v_top1, v_top5, v_loss = process.validate(val_loader, model,
                                                      criterion, epoch,
                                                      monitors, args)

            tbmonitor.writer.add_scalars('Train_vs_Validation/Loss', {
                'train': t_loss,
                'val': v_loss
            }, epoch)
            tbmonitor.writer.add_scalars('Train_vs_Validation/Top1', {
                'train': t_top1,
                'val': v_top1
            }, epoch)
            tbmonitor.writer.add_scalars('Train_vs_Validation/Top5', {
                'train': t_top5,
                'val': v_top5
            }, epoch)

            perf_scoreboard.update(v_top1, v_top5, epoch)
            is_best = perf_scoreboard.is_best(epoch)
            util.save_checkpoint(epoch, args.arch, model, {
                'top1': v_top1,
                'top5': v_top5
            }, is_best, args.name, log_dir)

        logger.info('>>>>>>>> Epoch -1 (final model evaluation)')
        process.validate(test_loader, model, criterion, -1, monitors, args)

    logger.info('Program completed successfully ... exiting ...')
    logger.info(
        'If you have any questions or suggestions, please visit: github.com/zhutmost/lsq-net'
    )
示例#30
0
import sys
import traceback
from datetime import datetime
from os.path import basename, dirname, isfile, join, splitext
from xml.dom import minidom
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement

from device.mantimermanager import ManTimerManager
from dictionary import dictionary_parse, dictionary_write
from util import b2s, bfromhex, init_logger, s2b, tohexs

DEVICE_SAVE_FLAG_TABLE = 1
DEVICE_SAVE_FLAG_MAIN = 2

_LOGGER = init_logger(__name__, level=logging.DEBUG)

if sys.version_info >= (3, 0):
    long = int


class Device(object):
    epoch = datetime.utcfromtimestamp(0)
    GET_STATE_ACTION = '-999'

    def process_asynch_state_change(self, state):
        pass

    def connect_devices(self, device_map):
        pass
示例#31
0
                     "started".format(event))

    def connection_check_out_failed(self, event):
        self.logger.info("[pool {0.address}] connection check out "
                     "failed, reason: {0.reason}".format(event))

    def connection_checked_out(self, event):
        self.logger.info("[pool {0.address}][conn #{0.connection_id}] "
                     "connection checked out of pool".format(event))

    def connection_checked_in(self, event):
        self.logger.info("[pool {0.address}][conn #{0.connection_id}] "
                     "connection checked into pool".format(event))


monitoring.register(ConnectionPoolListener(init_logger('tuixue_mongo_conn', './logs', True)))


class VisaStatus:
    """ MongoDB operations for storing:
        1. All fetched visa status by (visa_type, embassy_code), *only successful fetching*
        2. Overview of available appointment date of a given write date, *only successful fetching*
        3. Latest written time and data, *including failed one*

        The successfully fetched visa status will be stored in Mongo collection `'visa_status'`
        and the latest written time will be stored in Mongo collection `'latest_written'`.

        The schema of documents for `'visa_status'` is as follow:

        ```python
        {
import os

from aiogram import Bot, Dispatcher
from aiogram.types import Message, CallbackQuery, InlineKeyboardMarkup, InlineKeyboardButton
from aiogram.utils import executor
from aiogram.utils.exceptions import MessageToDeleteNotFound

import util

logger = util.init_logger(__name__)

API_TOKEN = os.environ.get('API_TOKEN')
WEBHOOK_HOST = os.environ.get('WEBHOOK_HOST', '')
WEBHOOK_PATH = '/' + API_TOKEN
WEBHOOK_URL = WEBHOOK_HOST + API_TOKEN
LOCAL_MODE = bool(int(os.environ.get('LOCAL_MODE', '0')))
CONNECTION_TYPE = os.environ.get('CONNECTION_TYPE', None)

if not CONNECTION_TYPE:
    CONNECTION_TYPE = 'polling' if LOCAL_MODE else 'webhook'
PROXY = os.environ.get('PROXY', 'socks5://127.0.0.1:9150')

bot = Bot(API_TOKEN, proxy=PROXY) if LOCAL_MODE else Bot(API_TOKEN)
dispatcher = Dispatcher(bot)


async def send_start_message(user_id):
    await bot.send_message(user_id,
                           util.get_time_welcome(),
                           reply_markup=InlineKeyboardMarkup().add(
                               InlineKeyboardButton(
示例#33
0
        '--do_continue',
        action='store_true',
        help='Dictates whether to load optim dict, scheduler dict, epoch_i')
    args = parser.parse_args()
    assert args.save_path is not None

    # Make dirs
    if args.load_path is not None:
        os.makedirs(os.path.dirname(args.load_path), exist_ok=True)
    os.makedirs(os.path.dirname(args.save_path), exist_ok=True)
    if args.log_path is not None:
        os.makedirs(os.path.dirname(args.log_path), exist_ok=True)
    DO_LOAD = args.load_path is not None and os.path.exists(args.load_path)

    # Start logging
    util.init_logger(args.log_path)
    info(args)

    # Make dataloaders
    trainset, testset = get_datasets(args.train, args.test)
    trainloader = DataLoader(trainset, batch_size=args.train_bs, shuffle=True)
    testloader = DataLoader(testset, batch_size=args.test_bs)

    # Build model
    master_net = network.Net(arch=[int(x) for x in args.arch.split()])
    if DO_LOAD:
        dump = torch.load(args.load_path)
        epoch_i = dump['epoch_i'] if args.do_continue else 0
        master_net.load_state_dict(dump['state_dict'])
        info('Loaded from %s' % args.load_path)
    else:
示例#34
0
    ft3d_dataset = ft3d_filenames(args.dataset_path)

    tf.logging.set_verbosity(tf.logging.ERROR)
    dispnet = DispNet(mode="test",
                      ckpt_path=args.checkpoint_path,
                      dataset=ft3d_dataset,
                      input_size=INPUT_SIZE,
                      batch_size=args.batch_size,
                      is_corr=args.use_corr)

    ckpt = tf.train.latest_checkpoint(args.checkpoint_path)
    if not ckpt:
        logging.error("no checkpoint in provided path found!")
        sys.exit()
    init_logger(args.checkpoint_path)
    log_step = args.log_step
    if args.n_steps is None:
        N_test = len(ft3d_dataset["TEST"])
    else:
        N_test = args.n_steps

    gpu_options = tf.GPUOptions(allow_growth=True)
    with tf.Session(graph=dispnet.graph,
                    config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(dispnet.init)
        logging.debug("initialized")
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        logging.debug("queue runners started")
        try:
示例#35
0
def main(args):
  logger = init_logger(args.run_name)

  # Datasets
  img_height, img_width, _ = InceptionV3.SHAPE

  def prep_func(f, x, y):
    x = read_image(x)
    x = decode_png(x)
    x = resize(x, img_height, img_width)
    return f, x, y

  trn_ds = make_dataset(args.train_dir, args.batch_size, prep_func,
                        shuffle=True, repeat=True, add_filenames=True)
  val_ds = make_dataset(args.train_dir, args.batch_size, prep_func,
                        shuffle=False, repeat=False, add_filenames=True)
  tst_ds = make_dataset(args.train_dir, args.batch_size, prep_func,
                        shuffle=False, repeat=False, add_filenames=True)

  num_classes = len(trn_ds.labels_map)

  it = tf.data.Iterator.from_structure(
    trn_ds.dataset.output_types, trn_ds.dataset.output_shapes)

  num_trn_batches = int(math.ceil(float(trn_ds.size) / args.batch_size))
  num_val_batches = int(math.ceil(float(val_ds.size) / args.batch_size))
  num_tst_batches = int(math.ceil(float(tst_ds.size) / args.batch_size))

  trn_init_op = it.make_initializer(trn_ds.dataset)
  val_init_op = it.make_initializer(val_ds.dataset)
  tst_init_op = it.make_initializer(tst_ds.dataset)

  # Filename, input image and corrsponding one hot encoded label
  f, x, y = it.get_next()

  sess = tf.Session()

  # Model and logits
  is_training = tf.placeholder(dtype=tf.bool)
  model = InceptionV3(nb_classes=num_classes, is_training=is_training)
  logits = model.get_logits(x)

  attacks_ord = {
    'inf': np.inf,
    '1': 1,
    '2': 2
  }

  # FGM attack
  attack_params = {
    'eps': args.eps,
    'clip_min': 0.0,
    'clip_max': 1.0,
    'ord': attacks_ord[args.ord],
  }
  attack = FastGradientMethod(model, sess)

  # Learning rate with exponential decay
  global_step = tf.Variable(0, trainable=False)
  global_step_update_op = tf.assign(global_step, tf.add(global_step, 1))
  lr = tf.train.exponential_decay(
    args.initial_lr, global_step, args.lr_decay_steps,
    args.lr_decay_factor, staircase=True)

  cross_entropy = CrossEntropy(model, attack=attack,
                               smoothing=args.label_smth,
                               attack_params=attack_params,
                               adv_coeff=args.adv_coeff)
  loss = cross_entropy.fprop(x, y)

  # Gradients clipping
  opt = tf.train.RMSPropOptimizer(learning_rate=lr, decay=args.opt_decay,
                                  epsilon=1.0)
  gvs = opt.compute_gradients(loss)
  clip_min, clip_max = -args.grad_clip, args.grad_clip

  capped_gvs = []
  for g, v in gvs:
    capped_g = tf.clip_by_value(g, clip_min, clip_max) \
      if g is not None else tf.zeros_like(v)
    capped_gvs.append((capped_g, v))

  train_op = opt.apply_gradients(capped_gvs)

  saver = tf.train.Saver()
  global_init_op = tf.global_variables_initializer()

  if args.load_model and args.restore_path:
    saver.restore(sess, args.restore_path)
    logger.info("Model restored from: ".format(args.restore_path))


  with sess.as_default():
    sess.run(global_init_op)

    best_val_acc = -1
    for epoch in range(args.num_epochs):
      logger.info("Epoch: {:04d}/{:04d}".format(epoch + 1, args.num_epochs))
      sess.run(trn_init_op)

      for batch in range(num_trn_batches):
        loss_np, lr_np, _ = sess.run([loss, lr, train_op],
                                     feed_dict={is_training: True})
        logger.info("Batch: {:04d}/{:04d}, loss: {:.05f}, lr: {:.05f}"
          .format(batch + 1, num_trn_batches, loss_np, lr_np))

      logger.info("Epoch completed...")

      sess.run(global_step_update_op)
      val_acc = eval_acc(sess, logits, y, num_val_batches,
                         is_training, val_init_op)
      logger.info("Validation set accuracy: {:.05f}".format(val_acc))

      if best_val_acc < val_acc:
        output_path = saver.save(sess, args.model_path)
        logger.info("Model was successfully saved: {}".format(output_path))
        best_val_acc = val_acc
        pass

    tst_acc = eval_acc(sess, logits, y, num_tst_batches,
                       is_training, tst_init_op)
    logger.info("Test set accuracy: {:.05f}".format(tst_acc))
示例#36
0
       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
"""

from util import init_logger, conf, MQTTClient, handle_sigterm, delay_start
from smart_meter import Discovery
import signal

if __name__ == '__main__':
    signal.signal(signal.SIGTERM, handle_sigterm)
    signal.signal(signal.SIGINT, handle_sigterm)
    if conf.StartDelay.enabled:
        delay_start(conf.StartDelay.min, conf.StartDelay.max)
    init_logger(conf.Logger.level)
    try:
        device_pool = dict()
        mqtt_client = MQTTClient()
        discovery = Discovery(mqtt_client=mqtt_client)
        mqtt_client.on_connect = discovery.schedule_refresh
        mqtt_client.on_message = discovery.schedule_refresh
        discovery.start()
        mqtt_client.start()
    finally:
        pass
def init():
    """ Entrance of CLI"""
    parser = argparse.ArgumentParser(description='Asynchronous Topcoder Data Collector command line tool.')
    parser.add_argument(
        '--with-registrant',
        action='store_true',
        dest='with_registrant',
        default=False,  # Temporary setting
        help='Whether fetch registrant details or not.'
    )
    parser.add_argument(
        '--status',
        dest='status',
        default=Status.ALL,
        type=Status,
        help='The status of challenges for fetching.'
    )
    parser.add_argument(
        '-s', '--since',
        dest='since',
        default=(datetime.now(timezone.utc) - timedelta(days=1)).strftime('%Y-%m-%d'),
        type=lambda since: replace_datetime_tail(datetime.fromisoformat(since), 'min'),
        help='Specify the earliest of end date in UTC of a challenge.',
    )
    parser.add_argument(
        '-t', '--to',
        dest='to',
        default=datetime.now(timezone.utc).strftime('%Y-%m-%d'),
        type=lambda to: replace_datetime_tail(datetime.fromisoformat(to), 'max'),
        help='Specify the latest of start date int UTC of a challenge.',
    )
    parser.add_argument(
        '--output-dir',
        dest='output_dir',
        default=Path(os.path.join(os.curdir, 'data')),
        type=Path,
        help='Directory for storoage of the fetch data. Create one if not exist',
    )
    parser.add_argument(
        '--log-dir',
        dest='log_dir',
        default=Path(os.path.join(os.curdir, 'logs')),
        type=Path,
        help='Directory for stroage of logs. Create one if not exist',
    )
    parser.add_argument(
        '--debug',
        action='store_true',
        default=False,
        help='Whether to log debug level message.'
    )

    args = parser.parse_args()

    if args.since > args.to:
        print('since value should not be greatter than to value.')
        exit(1)

    if not args.output_dir.is_dir():
        os.mkdir(args.output_dir)

    if not args.log_dir.is_dir():
        os.mkdir(args.log_dir)

    logger = init_logger(args.log_dir, 'fetch', args.debug)

    asyncio.run(Fetcher(args.status, args.since, args.to, args.with_registrant, args.output_dir, logger).fetch())
示例#38
0
from kivy.metrics import dp
from kivy.properties import BooleanProperty, ListProperty, ObjectProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.colorpicker import ColorPicker
from kivy.uix.screenmanager import Screen
from kivy.utils import get_color_from_hex
from kivymd.uix.button import MDFlatButton, MDRaisedButton
from kivymd.uix.dialog import MDDialog
from kivymd.uix.list import TwoLineListItem
from kivymd.uix.tab import MDTabsBase
from util import init_logger
from util.timer import Timer

from .mdcardpost import ICON_TRASH, SwipeToDeleteItem

_LOGGER = init_logger(__name__)

Builder.load_string('''
<FormatterItem>:
    height: dp(56)
    font_style: "H6"
    secondary_font_style: "H5"

<ViewWidget>:
    name: 'view_edit'
    BoxLayout:
        orientation: 'vertical'
        height: self.minimum_height
        MDToolbar:
            id: id_toolbar
            pos_hint: {'top': 1}
        if f'{visa_type}-{location}' not in G.CD_LIST:
            return
        self.logger.warning(
            f"mark {visa_type} {location} unavailable for {cd.seconds}s")
        with G.LOCK:
            self.session_avail[visa_type][location] = datetime.now() + cd


if __name__ == "__main__":
    # Manual testing

    from pprint import pprint

    test_log = 'test_session_log'
    G.assign('log_name', test_log)
    util.init_logger(test_log, './logs', debug=True)
    for sys in ('cgi', 'ais'):
        G.assign('target_system', sys)
        G.assign('session_file', f'test_{sys}_session.json')
        sc = SessionCache()

        if sys == 'cgi':
            sess = sc.get_session('F', '金边')
            print(sess)
            new_sess = Session(session='new_sess_{}'.format(''.join(
                random.choices(string.ascii_lowercase, k=16))),
                               sys='cgi')
            sc.replace_session('F', '金边', sess, new_sess)
            pprint(sc.session['F']['金边'])
        elif sys == 'ais':
            sess = sc.get_session('F', 'en-gb')
示例#40
0
if args.modelnet_subset:
    assert args.data.endswith('.lmdb')
if args.combine_train_val:
    assert args.data.endswith('.lmdb')
if args.rgb:
    assert args.data.endswith('.lmdb')
if args.force_res:
    assert args.data.endswith('.lmdb')

logname = 'logging.log'
if args.skip_train:
    logname = 'logging_eval.log'
if args.nolog:
    logname = ''
logger = util.init_logger(args.logdir, fname=logname)


def load_model(classes):
    model = models.resnet_mvgcnn(
        args.depth,
        pretrained=args.pretrained,
        num_classes=len(classes),
        gconv_channels=args.gconv_channels,
        view_dropout=args.view_dropout,
        n_group_elements=args.n_group_elements,
        n_homogeneous=args.n_homogeneous,
        bn_after_gconv=args.bn_after_gconv,
        gconv_activation=ACTIVATIONS[args.gconv_activation],
        gconv_support=args.gconv_support,
        viewpool=args.viewpool,
Tuning Dimensional Hyperparameters for RNN-based Model
Notice: for tuning training-related hyperparameters only.
Models that depends on different DataLoaders not applicable.
"""
import time
import logging
import logging.config

from constants import HParamKey
from constants import DefaultConfig as config
from supervisor import Supervisor
import util

# config logger
logger = logging.getLogger('__main__')
util.init_logger(logfile='demo-{}.log'.format(int(time.time())))

# init supervisor
spv = Supervisor(config)
# get DataLoader for supervisor
spv.load_data()
spv.get_dataloader()

result_folder = util.get_results_folder()
logger.info(
    "Output directory (loss history, accuracy history, checkpoints): {}".
    format(result_folder))

# ============ RNN demo ===========
# new parameters
conf_update = {