Beispiel #1
0
    def add_params(name: str, *obj: object):
        log = ChangeLog.query.filter_by(name=name).all()
        successful = False
        if log is not None and len(log) > 0:
            return
        else:
            try:
                changelog = ChangeLog()
                for o in obj:
                    try:
                        db.session.add(o)
                        successful = True
                    except Exception as e:
                        Logger.info(e)
                        db.session.rollback()
                        successful = False

                    if successful:
                        changelog.name = name
                        changelog.created = str(int(dt.now().timestamp()))
                        db.session.add(changelog)

                db.session.commit()
            except Exception as e:
                Logger.info(e)
                db.session.rollback()
                raise
Beispiel #2
0
    def load_data():
        if config.Server.Settings.load_gameobjects:
            WorldLoader.load_gameobjects()
        else:
            Logger.info('Skipped game object loading.')

        if config.Server.Settings.load_creatures:
            WorldLoader.load_creature_loot_templates()
            WorldLoader.load_creatures()
            WorldLoader.load_creature_quests()
            WorldLoader.load_creature_involved_quests()
        else:
            Logger.info('Skipped creature loading.')

        WorldLoader.load_item_templates()
        WorldLoader.load_quests()
        WorldLoader.load_spells()
        WorldLoader.load_skills()
        WorldLoader.load_skill_line_abilities()
        WorldLoader.load_taxi_nodes()
        WorldLoader.load_taxi_path_nodes()

        # Character related data
        WorldLoader.load_groups()
        WorldLoader.load_guilds()
Beispiel #3
0
    def load_data():
        # Map tiles
        MapManager.initialize_maps()
        MapManager.initialize_area_tables()

        # Gameobject spawns
        if config.Server.Settings.load_gameobjects:
            WorldLoader.load_gameobjects()
            WorldLoader.load_gameobject_loot_templates()
        else:
            Logger.info('Skipped game object loading.')

        # Creature spawns
        if config.Server.Settings.load_creatures:
            WorldLoader.load_creature_loot_templates()
            WorldLoader.load_creatures()
            WorldLoader.load_creature_quests()
            WorldLoader.load_creature_involved_quests()
        else:
            Logger.info('Skipped creature loading.')

        WorldLoader.load_item_templates()
        WorldLoader.load_quests()
        WorldLoader.load_spells()
        WorldLoader.load_skills()
        WorldLoader.load_skill_line_abilities()
        WorldLoader.load_char_base_infos()
        WorldLoader.load_taxi_nodes()
        WorldLoader.load_taxi_path_nodes()
        WorldLoader.load_factions()
        WorldLoader.load_faction_templates()

        # Character related data
        WorldLoader.load_groups()
        WorldLoader.load_guilds()
Beispiel #4
0
 def parseStringDtToDate(string_date):
     try:
         date_formate = datetime.datetime.strptime(string_date, '%Y-%m-%d')
         parsedDate = date_formate.strftime("%Y-%m-%d")
         return parsedDate
     except Exception as e:
         Logger.info(e)
Beispiel #5
0
    def load_data():
        # Map tiles
        MapManager.initialize_maps()
        MapManager.initialize_area_tables()

        # Below order matters.

        # Loot related, even if not loading creatures or gameobjects, loot might be referenced.
        WorldLoader.load_gameobject_loot_templates()
        WorldLoader.load_fishing_loot_templates()
        WorldLoader.load_creature_loot_templates()
        WorldLoader.load_item_templates()
        WorldLoader.load_reference_loot_templates()
        WorldLoader.load_pickpocketing_loot_templates()
        WorldLoader.load_item_loot_templates()

        # Spells.
        WorldLoader.load_spells()
        WorldLoader.load_creature_spells()

        # Gameobject spawns
        if config.Server.Settings.load_gameobjects:
            WorldLoader.load_gameobject_quest_starters()
            WorldLoader.load_gameobject_quest_finishers()
            WorldLoader.load_gameobjects()
        else:
            Logger.info('Skipped game object loading.')

        # Creature spawns
        if config.Server.Settings.load_creatures:
            WorldLoader.load_creature_equip_templates()
            WorldLoader.load_creatures()
            WorldLoader.load_creature_on_kill_reputation()
            WorldLoader.load_creature_quest_starters()
            WorldLoader.load_creature_quest_finishers()
            WorldLoader.load_creature_display_info()
            WorldLoader.load_creature_model_info()
            WorldLoader.load_creature_families()
            WorldLoader.load_npc_gossip()
            WorldLoader.load_npc_text()
        else:
            Logger.info('Skipped creature loading.')

        WorldLoader.load_area_trigger_quest_relations()
        WorldLoader.load_quests()
        WorldLoader.load_spell_chains()
        WorldLoader.load_trainer_spells()
        WorldLoader.load_skills()
        WorldLoader.load_skill_line_abilities()
        WorldLoader.load_char_base_infos()
        WorldLoader.load_taxi_nodes()
        WorldLoader.load_taxi_path_nodes()
        WorldLoader.load_factions()
        WorldLoader.load_faction_templates()
        WorldLoader.load_locks()

        # Character related data
        WorldLoader.load_groups()
        WorldLoader.load_guilds()
Beispiel #6
0
    def start():
        Logger.info('Proxy server started.')

        ThreadedProxyServer.allow_reuse_address = True
        with ThreadedProxyServer((config.Server.Connection.RealmServer.host,
                                  config.Server.Connection.RealmProxy.port), ProxyServerSessionHandler) \
                as proxy_instance:
            proxy_session_thread = threading.Thread(
                target=proxy_instance.serve_forever())
            proxy_session_thread.daemon = True
            proxy_session_thread.start()
Beispiel #7
0
    def start():
        Logger.info('Login server started.')

        ThreadedLoginServer.allow_reuse_address = True
        with ThreadedLoginServer((config.Server.Connection.RealmServer.host,
                                  config.Server.Connection.RealmServer.port), LoginServerSessionHandler) \
                as login_instance:
            login_session_thread = threading.Thread(
                target=login_instance.serve_forever())
            login_session_thread.daemon = True
            login_session_thread.start()
Beispiel #8
0
 def start():
     ThreadedProxyServer.allow_reuse_address = True
     with ThreadedProxyServer((config.Server.Connection.RealmProxy.host,
                               config.Server.Connection.RealmProxy.port), ProxyServerSessionHandler) \
             as proxy_instance:
         Logger.success(f'Proxy server started, listening on {proxy_instance.server_address[0]}:{proxy_instance.server_address[1]}')
         try:
             proxy_session_thread = threading.Thread(target=proxy_instance.serve_forever())
             proxy_session_thread.daemon = True
             proxy_session_thread.start()
         except KeyboardInterrupt:
             Logger.info("Proxy server turned off.")
Beispiel #9
0
    def _load_data():
        # TODO: Use threads to load the data more efficiently
        if config.Server.Settings.load_gameobjects:
            WorldServerSessionHandler._load_gameobjects()
        else:
            Logger.info('Skipped game object loading.')

        if config.Server.Settings.load_creatures:
            WorldServerSessionHandler._load_creatures()
        else:
            Logger.info('Skipped creature loading.')

        WorldServerSessionHandler._load_spells()
    def start():
        Logger.success('Login server started.')

        ThreadedLoginServer.allow_reuse_address = True
        with ThreadedLoginServer((config.Server.Connection.RealmServer.host,
                                  config.Server.Connection.RealmServer.port), LoginServerSessionHandler) \
                as login_instance:
            try:
                login_session_thread = threading.Thread(target=login_instance.serve_forever())
                login_session_thread.daemon = True
                login_session_thread.start()
            except KeyboardInterrupt:
                Logger.info("Login server turned off.")
Beispiel #11
0
 async def launch(self):
     try:
         self.ws = await websockets.connect(f"ws://{self.host}:{self.port}",
                                            extra_headers=self._headers())
         if self.ws.open:
             Logger.info(
                 f"Node connected with host: {self.host} and port: {self.port}."
             )
             self._manager.bot.loop.create_task(self._wait_for_ws_message())
             self.ready = True
     except OSError as error:
         Logger.error(
             f"Failed to connect to LavaLink with host: {self.host} reason: {error}"
         )
Beispiel #12
0
 def start():
     ThreadedLoginServer.allow_reuse_address = True
     with ThreadedLoginServer((config.Server.Connection.RealmServer.host,
                               config.Server.Connection.RealmServer.port), LoginServerSessionHandler) \
             as login_instance:
         Logger.success(f'Login server started, listening on {login_instance.server_address[0]}:{login_instance.server_address[1]}')
         # Make sure all characters have online = 0 on realm start.
         RealmDatabaseManager.character_set_all_offline()
         try:
             login_session_thread = threading.Thread(target=login_instance.serve_forever())
             login_session_thread.daemon = True
             login_session_thread.start()
         except KeyboardInterrupt:
             Logger.info("Login server turned off.")
Beispiel #13
0
    def load_data():
        if config.Server.Settings.load_gameobjects:
            WorldLoader.load_gameobjects()
        else:
            Logger.info('Skipped game object loading.')

        if config.Server.Settings.load_creatures:
            WorldLoader.load_creatures()
        else:
            Logger.info('Skipped creature loading.')

        WorldLoader.load_spells()
        WorldLoader.load_skills()
        WorldLoader.load_skill_line_abilities()
        WorldLoader.load_taxi_nodes()
        WorldLoader.load_taxi_path_nodes()
Beispiel #14
0
    def load_data():
        # Map tiles
        MapManager.initialize_maps()
        MapManager.initialize_area_tables()

        # Gameobject spawns
        if config.Server.Settings.load_gameobjects:
            WorldLoader.load_gameobjects()
            WorldLoader.load_gameobject_loot_templates()
            WorldLoader.load_gameobject_quest_starters()
            WorldLoader.load_gameobject_quest_finishers()
        else:
            Logger.info('Skipped game object loading.')

        # Creature spawns
        if config.Server.Settings.load_creatures:
            WorldLoader.load_creature_loot_templates()
            WorldLoader.load_creature_equip_templates()
            WorldLoader.load_creatures()
            WorldLoader.load_creature_quest_starters()
            WorldLoader.load_creature_quest_finishers()
            WorldLoader.load_creature_display_info()
            WorldLoader.load_creature_model_info()
            WorldLoader.load_npc_gossip()
            WorldLoader.load_npc_text()
        else:
            Logger.info('Skipped creature loading.')

        WorldLoader.load_item_templates()
        WorldLoader.load_item_loot_templates()
        WorldLoader.load_quests()
        WorldLoader.load_spells()
        WorldLoader.load_spell_chains()
        WorldLoader.load_trainer_spells()
        WorldLoader.load_skills()
        WorldLoader.load_skill_line_abilities()
        WorldLoader.load_char_base_infos()
        WorldLoader.load_taxi_nodes()
        WorldLoader.load_taxi_path_nodes()
        WorldLoader.load_factions()
        WorldLoader.load_faction_templates()

        # Character related data
        WorldLoader.load_groups()
        WorldLoader.load_guilds()
Beispiel #15
0
    def start():
        WorldLoader.load_data()
        Logger.success('World server started.')

        WorldServerSessionHandler.schedule_updates()

        ThreadedWorldServer.allow_reuse_address = True
        ThreadedWorldServer.timeout = 10
        with ThreadedWorldServer((config.Server.Connection.RealmServer.host,
                                  config.Server.Connection.WorldServer.port),
                                 WorldServerSessionHandler) as world_instance:
            try:
                world_session_thread = threading.Thread(
                    target=world_instance.serve_forever())
                world_session_thread.daemon = True
                world_session_thread.start()
            except KeyboardInterrupt:
                Logger.info("World server turned off.")
Beispiel #16
0
def main():
    logger = Logger(name = "runner_test")
    
    service = FDTService(logger)
    
    report(service, logger)
    
    # number of FDT Java servers to create
    numServers = 3
    ids = ["request-%03d" % i for i in range(numServers)]
    
    threads = []
    for id in ids:
        runner = ThreadCaller(id, "FDT caller", service.service, logger)
        runner.start()
        threads.append(runner)
   
    # wait until all threads finish ... no matter how but must finish
    logger.info("Waiting for FDT caller threads to terminate ...")
    for t in threads:
        t.join()
        
   
    report(service, logger)     
        
    threads = []
    
    # commenting out this section - cleaning up and restarting the
    # script should show reliable failure reporting since the ports
    # occupied from the previous script run
    for id in ids:
        wiper = ThreadCaller(id, "FDT wiper", service.cleanUp, logger)
        wiper.start()
        threads.append(wiper)

    # wait until all threads finish ... no matter how but must finish
    logger.info("Waiting for FDT wiper threads to terminate ...")
    for t in threads:
        t.join()

    report(service, logger)
Beispiel #17
0
async def on_ready():
    bot.started_at = time.time()
    Logger.info(
        f"Bot ready as: {bot.user} serving in {len(bot.guilds)} guilds with {len(bot.users)} users."
    )
    Logger.info("Loading cogs...")
    for cog in bot.cog_files:
        bot.load_extension(f"cogs.{cog}")
        Logger.info(f"Loaded cog: {cog}")
    Logger.info("All cogs loaded.")
    bot.loop.create_task(status_change())
    Logger.task("Status change has started.")
    bot.loop.create_task(bot.music_manager.audio_task())
    Logger.task("Audio task has started.")
def main():
    params = ParameterParser().get_params()
    urls_manager = URLsManager(
        params['search_terms'], 
        params['n_pics_in_batch'], 
        params['number_pictures']
    )    
    browser = BrowserWrapper(params['search_terms'], params['target_directory'])
    logger = Logger(params['log_file_directory'])

    try:
        logger.info('Started new session for search terms {}.'.format(
            ' '.join(params['search_terms'])
        ))
        if params['links_only']:
            logger.info('Using URL search-only mode.')
        elif params['pictures_only']:
            logger.info('Using picture download-only mode.')        

        while not urls_manager.total_pic_number_collected():
            collect_urls(params, urls_manager, browser, logger)            
            logger.info('Collected full batch of URLs. {} in queue.'.format(urls_manager.in_queue()))

            try:
                download_pics(params, urls_manager, browser, logger)
            except ElementDoesNotExistException:
                time.sleep(30)
                try:
                    download_pics(params, urls_manager, browser, logger)
                except ElementDoesNotExistException:
                    continue

            logger.info('Downloaded all images from current batch, collecting new URLs.')

    except KeyboardInterrupt:
        logger.warn('Keyboard interrupt, trying to make URL lists persistent...')
        urls_manager.print_url_list_sizes()
        urls_manager.write_urls()
Beispiel #19
0
def main():
    logger = Logger(name="runner_test")
    service = FDTService(logger)
    report(service, logger)

    # number of FDT Java servers to create
    numServers = 3
    ids = ["request-%03d" % i for i in range(numServers)]

    threads = []
    for id in ids:
        runner = ThreadCaller(id, "FDT caller", service.service, logger)
        runner.start()
        threads.append(runner)

    # wait until all threads finish ... no matter how but must finish
    logger.info("Waiting for FDT caller threads to terminate ...")
    for t in threads:
        t.join()

    report(service, logger)

    threads = []

    # commenting out this section - cleaning up and restarting the
    # script should show reliable failure reporting since the ports
    # occupied from the previous script run
    for id in ids:
        wiper = ThreadCaller(id, "FDT wiper", service.cleanUp, logger)
        wiper.start()
        threads.append(wiper)

    # wait until all threads finish ... no matter how but must finish
    logger.info("Waiting for FDT wiper threads to terminate ...")
    for t in threads:
        t.join()

    report(service, logger)
Beispiel #20
0
    def _load_data():
        # TODO: Use threads to load the data more efficiently
        if config.Server.Settings.load_gameobjects:
            Logger.info('Loading game objects...')
            gobject_number = WorldServerSessionHandler._load_gameobjects()
            Logger.success('%u game objects successfully loaded.' %
                           gobject_number)
        else:
            Logger.info('Skipped game object loading.')

        if config.Server.Settings.load_creatures:
            Logger.info('Loading creature spawns...')
            creature_number = WorldServerSessionHandler._load_creatures()
            Logger.success('%u creature spawns successfully loaded.' %
                           creature_number)
        else:
            Logger.info('Skipped creature loading.')
Beispiel #21
0
    def start():
        Logger.info('World server started.')

        Logger.info('Loading realm tables...')
        RealmDatabaseManager.load_tables()
        Logger.info('Realm tables loaded.')

        ThreadedWorldServer.allow_reuse_address = True
        with ThreadedWorldServer((config.Server.Connection.RealmServer.host, config.Server.Connection.WorldServer.port),
                                 WorldServerSessionHandler) as world_instance:
            world_session_thread = threading.Thread(target=world_instance.serve_forever())
            world_session_thread.daemon = True
            world_session_thread.start()
torch.random.manual_seed(conf.seed)

device = torch.device('cuda') if torch.cuda.is_available() else torch.device(
    'cpu')

dataset = Dataset(data_dir=conf.data_dir,
                  data_name=model_conf.data_name,
                  train_ratio=model_conf.train_ratio,
                  device=device)

log_dir = os.path.join('saves', conf.model)
logger = Logger(log_dir)
model_conf.save(os.path.join(logger.log_dir, 'config.json'))

eval_pos, eval_target = dataset.eval_data()
item_popularity = dataset.item_popularity
evaluator = Evaluator(eval_pos, eval_target, item_popularity, model_conf.top_k)

model_base = getattr(models, conf.model)
model = model_base(model_conf, dataset.num_users, dataset.num_items, device)

logger.info(model_conf)
logger.info(dataset)

trainer = Trainer(dataset=dataset,
                  model=model,
                  evaluator=evaluator,
                  logger=logger,
                  conf=model_conf)

trainer.train()
    os.makedirs(exp_visual_dir)

exp_ckpt_dir = os.path.join(exp_dir, "checkpoints")
if not os.path.exists(exp_ckpt_dir):
    os.makedirs(exp_ckpt_dir)

now_str = datetime.datetime.now().__str__().replace(' ', '_')
writer_path = os.path.join(exp_visual_dir, now_str)
writer = SummaryWriter(writer_path)

logger_path = os.path.join(exp_log_dir, now_str + ".log")
logger = Logger(logger_path).get_logger()

os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

logger.info("argument parser settings: {}".format(args))

logger.info("basic configuration settings: {}".format(basic_configs))

# Part 2-4. configurations for loss function, network, and optimizer

loss_function = nn.CrossEntropyLoss()

max_val_acc = 0.0
max_val_acc_epoch = -1

network_configs = collections.OrderedDict()

network_configs['output_dim'] = 345
network_configs['n_heads'] = 8
def __init__():

    # Set up the global logger
    log = Logger('debug', 180)

    # Parse arguments
    parser = ArgumentParser(
        "ImageCaptionLearn_py: Neural Network for Nonvisual "
        "Prediction; Bidirectional LSTM to hidden layer "
        "to softmax over (v)isual and (n)onvisual labels")
    parser.add_argument(
        "--epochs",
        type=int,
        default=20,
        help="train opt; number of times to iterate over the dataset")
    parser.add_argument(
        "--batch_size",
        type=int,
        default=512,
        help="train opt; number of random mention pairs per batch")
    parser.add_argument("--lstm_hidden_width",
                        type=int,
                        default=200,
                        help="train opt; number of hidden units within "
                        "the LSTM cells")
    parser.add_argument("--start_hidden_width",
                        type=int,
                        default=512,
                        help="train opt; number of hidden units in the "
                        "layer after the LSTM")
    parser.add_argument("--hidden_depth",
                        type=int,
                        default=2,
                        help="train opt; number of hidden layers after the "
                        "lstm, where each is last_width/2 units wide, "
                        "starting with start_hidden_width")
    parser.add_argument("--weighted_classes",
                        action="store_true",
                        help="Whether to inversely weight the classes "
                        "in the loss")
    parser.add_argument("--learn_rate",
                        type=float,
                        default=0.001,
                        help="train opt; optimizer learning rate")
    parser.add_argument("--adam_epsilon",
                        type=float,
                        default=1e-08,
                        help="train opt; Adam optimizer epsilon value")
    parser.add_argument("--clip_norm",
                        type=float,
                        default=5.0,
                        help='train opt; global clip norm value')
    parser.add_argument(
        "--data_norm",
        action='store_true',
        help="train opt; Whether to L2-normalize the w2v word vectors")
    parser.add_argument("--lstm_input_dropout",
                        type=float,
                        default=0.5,
                        help="train opt; probability to keep lstm input nodes")
    parser.add_argument("--dropout",
                        type=float,
                        default=0.5,
                        help="train opt; probability to keep all other nodes")
    parser.add_argument(
        "--data_dir",
        required=True,
        type=lambda f: util.arg_path_exists(parser, f),
        help="Directory containing raw/, feats/, and scores/ directories")
    parser.add_argument(
        "--data",
        choices=["flickr30k", "mscoco", "coco30k", "flickr30k_v1"],
        required=True,
        help="Dataset to use")
    parser.add_argument("--split",
                        choices=["train", "dev", "test", "trainDev"],
                        required=True,
                        help="Dataset split")
    parser.add_argument("--eval_data",
                        choices=["flickr30k", "mscoco", "coco30k"],
                        help="Evaluation dataset to use")
    parser.add_argument("--eval_split",
                        choices=["train", "dev", "test", "trainDev"],
                        help="Evaluation dataset split")
    parser.add_argument(
        "--encoding_scheme",
        choices=["first_last_sentence", 'first_last_mention'],
        default="first_last_mention",
        help="train opt; specifies how lstm outputs are transformed")
    parser.add_argument("--train", action='store_true', help='Trains a model')
    parser.add_argument(
        "--activation",
        choices=['sigmoid', 'tanh', 'relu', 'leaky_relu'],
        default='relu',
        help='train opt; which nonlinear activation function to use')
    parser.add_argument("--predict",
                        action='store_true',
                        help='Predicts using pre-trained model')
    parser.add_argument("--model_file",
                        type=str,
                        help="Model file to save/load")
    parser.add_argument("--embedding_type",
                        choices=['w2v', 'glove'],
                        default='w2v',
                        help="Word embedding type to use")
    parser.add_argument(
        "--early_stopping",
        action='store_true',
        help="Whether to implement early stopping based on the "
        "evaluation performance")
    args = parser.parse_args()
    arg_dict = vars(args)

    if arg_dict['train'] and arg_dict['model_file'] is None:
        arg_dict['model_file'] = "/home/ccervan2/models/tacl201801//" + \
                                 nn_data.build_model_filename(arg_dict, "affinity_lstm")
    model_file = arg_dict['model_file']
    util.dump_args(arg_dict, log)

    # Construct data files from the root directory and filename
    data_dir = arg_dict['data_dir'] + "/"
    data_root = arg_dict['data'] + "_" + arg_dict['split']
    eval_data_root = None
    if arg_dict['train']:
        eval_data_root = arg_dict['eval_data'] + "_" + arg_dict['eval_split']
    sentence_file = data_dir + "raw/" + data_root + "_captions.txt"
    mention_idx_file = data_dir + "raw/" + data_root + "_mentions_affinity.txt"
    feature_file = data_dir + "feats/" + data_root + "_affinity_neural.feats"
    feature_meta_file = data_dir + "feats/" + data_root + "_affinity_neural_meta.json"
    box_dir = data_dir + "feats/" + arg_dict['data'] + "_boxes/" + arg_dict[
        "split"] + "/"
    mention_box_label_file = data_dir + "raw/" + data_root + "_affinity_labels.txt"
    box_category_file = None
    #if "coco" in data_root:
    #    box_category_file = data_dir + "raw/" + data_root + "_box_cats.txt"
    if eval_data_root is not None:
        eval_box_dir = data_dir + "feats/" + arg_dict[
            'eval_data'] + "_boxes/" + arg_dict["eval_split"] + "/"
        eval_sentence_file = data_dir + "raw/" + eval_data_root + "_captions.txt"
        eval_mention_idx_file = data_dir + "raw/" + eval_data_root + "_mentions_affinity.txt"
        eval_feature_file = data_dir + "feats/" + eval_data_root + "_affinity_neural.feats"
        eval_feature_meta_file = data_dir + "feats/" + eval_data_root + "_affinity_neural_meta.json"
        eval_mention_box_label_file = data_dir + "raw/" + eval_data_root + "_affinity_labels.txt"
        eval_box_category_file = None
    #    if "coco" in eval_data_root:
    #        eval_box_category_file = data_dir + "raw/" + eval_data_root + "_box_cats.txt"
    #endif

    # Load the appropriate word embeddings
    embedding_type = arg_dict['embedding_type']
    if embedding_type == 'w2v':
        log.info("Initializing word2vec")
        nn_data.init_w2v()
    elif embedding_type == 'glove':
        log.info("Initializing glove")
        nn_data.init_glove()
    #endif

    # Set the random seeds identically every run
    nn_util.set_random_seeds()

    # Set up the minimum tensorflow logging level
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    # Train, if training was specified
    if arg_dict['train']:
        train(encoding_scheme=arg_dict['encoding_scheme'],
              embedding_type=embedding_type,
              sentence_file=sentence_file,
              mention_idx_file=mention_idx_file,
              feature_file=feature_file,
              feature_meta_file=feature_meta_file,
              box_dir=box_dir,
              mention_box_label_file=mention_box_label_file,
              epochs=arg_dict['epochs'],
              batch_size=arg_dict['batch_size'],
              lstm_hidden_width=arg_dict['lstm_hidden_width'],
              start_hidden_width=arg_dict['start_hidden_width'],
              hidden_depth=arg_dict['hidden_depth'],
              weighted_classes=arg_dict['weighted_classes'],
              input_dropout=arg_dict['lstm_input_dropout'],
              other_dropout=arg_dict['dropout'],
              lrn_rate=arg_dict['learn_rate'],
              clip_norm=arg_dict['clip_norm'],
              data_norm=arg_dict['data_norm'],
              adam_epsilon=arg_dict['adam_epsilon'],
              activation=arg_dict['activation'],
              model_file=model_file,
              eval_sentence_file=eval_sentence_file,
              eval_mention_idx_file=eval_mention_idx_file,
              eval_feature_file=eval_feature_file,
              eval_feature_meta_file=eval_feature_meta_file,
              eval_box_dir=eval_box_dir,
              eval_mention_box_label_file=eval_mention_box_label_file,
              early_stopping=arg_dict['early_stopping'],
              box_category_file=box_category_file,
              eval_box_category_file=eval_box_category_file,
              log=log)
    elif arg_dict['predict']:
        scores_file = data_dir + "scores/" + data_root + "_affinity.scores"

        # Restore our variables
        tf.reset_default_graph()
        with tf.Session() as sess:
            saver = tf.train.import_meta_graph(model_file + ".meta")
            saver.restore(sess, model_file)

            predict(encoding_scheme=arg_dict['encoding_scheme'],
                    embedding_type=embedding_type,
                    tf_session=sess,
                    batch_size=arg_dict['batch_size'],
                    sentence_file=sentence_file,
                    mention_idx_file=mention_idx_file,
                    feature_file=feature_file,
                    feature_meta_file=feature_meta_file,
                    box_dir=box_dir,
                    mention_box_label_file=mention_box_label_file,
                    scores_file=scores_file,
                    box_category_file=box_category_file,
                    log=log)
Beispiel #25
0
class SenderWindow:
    def __init__(self, sequenceN, windowSize):

        self.sequenceNoBit = sequenceN
        # secventa de numere a primului frame
        self.SeqFirst = 0
        self.logger = Logger("SenderWindow")
        self.maxSeq = math.pow(
            2, self.sequenceNoBit
        )  #fereastra are dim max jumatatate din numarul de secvente
        # secventa de numere a ultimului frame
        self.transmitWindow = OrderedDict(
        )  # va contine timerul si daca a fost ack(true/false)
        self.expectedAck = 0
        self.nextPkt = 0  # used to iterate through packetList
        self.nextSeqNo = 0
        self.in_progress = 1

        if 0 < windowSize <= self.maxSeq / 2:
            self.windowSize = windowSize
        else:
            self.logger.error(
                "Window size should be greater than 0 and less then 2^(sequenceN-1)!"
            )

    def getMaxSeq(self):
        return self.maxSeq

    def isEmpty(self):
        if len(self.transmitWindow) == 0:
            return True
        return False

    def insideWindow(self, pktNo):
        if pktNo in self.transmitWindow:
            return True
        return False

    def mark_ack(self, pktNo):
        with lock:
            self.transmitWindow[int(pktNo)][1] = True

    def unacked(self, pktNo):
        if pktNo in self.transmitWindow:
            return not self.transmitWindow[pktNo][1]

    def ackRecv(self, pktNo):
        with lock:
            if self.insideWindow(pktNo):
                self.transmitWindow[pktNo][0] = None  # stop timer for packet
        if pktNo == self.expectedAck:
            self.slideWindow()
            with lock:
                if len(self.transmitWindow) == 0:
                    self.expectedAck = self.nextSeqNo
                else:

                    self.expectedAck = list(self.transmitWindow.items())[0][
                        0]  # urm ack devine capatul inferior al ferestrei

    def slideWindow(self):
        to_delete = []
        for k, v in self.transmitWindow.items():
            if v[0] == None and v[1] == True:
                to_delete.append(k)
            else:
                break
        with lock:
            for item in to_delete:
                del self.transmitWindow[item]

    def getSeqNo(self):

        with lock:
            self.transmitWindow[self.nextSeqNo] = [None, False]
        self.nextSeqNo += 1
        if self.nextSeqNo >= self.maxSeq:
            self.nextSeqNo = int(self.nextSeqNo % self.maxSeq)
        self.nextPkt += 1

    def getNextPkt(self):
        return self.nextPkt

    def startTimer(self, pktNo):  # pkt.time=timpul la momentul curent
        self.logger.info(f"On thread-{pktNo} function startTimer")
        if pktNo in self.transmitWindow:
            with lock:
                self.transmitWindow[pktNo][0] = time.time()

    def getStartTime(
        self, pktNo
    ):  # va returna valoarea timpului in momentul trimiterii pachetului
        return self.transmitWindow[pktNo][0]

    def restartTimer(
        self, pktNo
    ):  # pkt.time=timpul la momentul curent- in caz de nevoie de retransmitere
        if self.insideWindow(pktNo):
            self.transmitWindow[pktNo][0] = time.time()

    def stopTimer(self, pktNo):
        self.logger.info(f"On thread-{pktNo} function stopTimer")
        with lock:
            self.transmitWindow[pktNo][0] = None

    def full(
        self
    ):  # in transmiterea unui nou pachet, se are in vederea dim max a ferestrei
        if len(self.transmitWindow) >= self.windowSize:
            return True
        return False

    def stopTransm(self):
        self.in_progress = 0

    def isTransmissionDone(self):
        return not self.in_progress
Beispiel #26
0
eval_type_file = abspath(
    expanduser(data_root + "cca/" + eval_prefix + "_type_" + lexical_types +
               ".csv"))
eval_scores_file = abspath(
    expanduser(data_root + "cca/" + eval_prefix + "_ccaScores.csv"))
scores_file = abspath(
    expanduser(data_root + "scores/" + eval_prefix + "_" + lexical_types +
               "Types_affinity.scores"))

if arg_dict['train']:
    type_id_dict, type_x_dict, type_y_dict = \
        load_cca_data(fit_id_file, fit_scores_file, fit_label_file, fit_type_file)

    # learn a separate curve for each lexical type
    for type in type_x_dict.keys():
        log.info('Training ' + type)
        x = np.array(type_x_dict[type]).reshape((-1, 1))
        y = np.array(type_y_dict[type])
        learner = LogisticRegression(max_iter=arg_dict['max_iter'], n_jobs=-1)
        learner.fit(x, y)
        model_file = abspath(
            expanduser(model_root + "affinity_" + type + ".model"))
        with open(model_file, 'wb') as pickle_file:
            cPickle.dump(learner, pickle_file)
        #endwith
    #endfor
#endif

if arg_dict['eval']:
    type_id_dict_eval, type_x_dict_eval, type_y_dict_eval = \
        load_cca_data(eval_id_file, eval_scores_file, eval_label_file, eval_type_file)
Beispiel #27
0
    proxy_process.start()

    world_process = context.Process(
        target=WorldManager.WorldServerSessionHandler.start)
    world_process.start()

    try:
        if os.getenv('CONSOLE_MODE', config.Server.Settings.console_mode) in [
                True, 'True', 'true'
        ]:
            while input() != 'exit':
                Logger.error('Invalid command.')
        else:
            world_process.join()
    except:
        Logger.info('Shutting down the core...')

    # Send SIGTERM to processes.
    world_process.terminate()
    Logger.info('World process terminated.')
    proxy_process.terminate()
    Logger.info('Proxy process terminated.')
    login_process.terminate()
    Logger.info('Login process terminated.')

    # Release process resources.
    Logger.info('Waiting to release resources...')
    release_process(world_process)
    release_process(proxy_process)
    release_process(login_process)
Beispiel #28
0
exp_ckpt_dir = os.path.join(exp_dir, "checkpoints")
if not os.path.exists(exp_ckpt_dir):
    os.makedirs(exp_ckpt_dir)

now_str = datetime.datetime.now().__str__().replace(' ', '_')
writer_path = os.path.join(exp_visual_dir, now_str)
writer = SummaryWriter(writer_path)

logger_path = os.path.join(exp_log_dir, now_str + ".log")
logger = Logger(logger_path).get_logger()

# TODO
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

logger.info("basic configuration settings: {}".format(basic_configs))
#logger.info("dataloader configuration settings: {}".format(dataloader_configs))

# Part 2-4. configurations for loss function, and optimizer

# TODO
loss_function = nn.CrossEntropyLoss()

net = models.inception_v3(num_classes=345)

logger.info("withOUT ImageNet pretraining!!!")
net = net.cuda()
# net = torch.nn.DataParallel(net, device_ids=[int(x) for x in args.gpu.split(',')]).cuda()

# optimizer = torch.optim.SGD(net.parameters(), lr=basic_configs['learning_rate'], momentum=0.9, weight_decay=5e-4)
# TODO  change as RMSProb
exp_ckpt_dir = os.path.join(exp_dir, "checkpoints")
if not os.path.exists(exp_ckpt_dir):
    os.makedirs(exp_ckpt_dir)

now_str = datetime.datetime.now().__str__().replace(' ', '_')
writer_path = os.path.join(exp_visual_dir, now_str)
writer = SummaryWriter(writer_path)

logger_path = os.path.join(exp_log_dir, now_str + ".log")
logger = Logger(logger_path).get_logger()

# TODO
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

logger.info("basic configuration settings: {}".format(basic_configs))
#logger.info("dataloader configuration settings: {}".format(dataloader_configs))

# Part 2-4. configurations for loss function, SiamRPN++ network, and optimizer

# TODO
loss_function = nn.CrossEntropyLoss()
training_loss = AverageMeter()

net = SiamRPN()
#net = net.cuda()
net = torch.nn.DataParallel(net,
                            device_ids=[int(x)
                                        for x in args.gpu.split(',')]).cuda()

optimizer = torch.optim.SGD(net.parameters(),
Beispiel #30
0
def __init__():
    # Set up the logger
    log = Logger('debug', 180)

    # Parse arguments
    parser = ArgumentParser("ImageCaptionLearn_py: Core Neural "
                            "Network classification architecture; "
                            "used for nonvis and cardinality prediction")
    parser.add_argument("--epochs",
                        type=int,
                        default=20,
                        help="train opt; number of times to "
                        "iterate over the dataset")
    parser.add_argument("--batch_size",
                        type=int,
                        default=512,
                        help="train opt; number of random mention "
                        "pairs per batch")
    parser.add_argument("--lstm_hidden_width",
                        type=int,
                        default=200,
                        help="train opt; number of hidden units "
                        "within the LSTM cells")
    parser.add_argument("--start_hidden_width",
                        type=int,
                        default=512,
                        help="train opt; number of hidden units "
                        "in the layer after the LSTM")
    parser.add_argument("--hidden_depth",
                        type=int,
                        default=2,
                        help="train opt; number of hidden layers "
                        "after the lstm, where each is "
                        "last_width/2 units wide, starting "
                        "with start_hidden_width")
    parser.add_argument("--weighted_classes",
                        action="store_true",
                        help="Whether to inversely weight the "
                        "classes in the loss")
    parser.add_argument("--learn_rate",
                        type=float,
                        default=0.001,
                        help="train opt; optimizer learning rate")
    parser.add_argument("--adam_epsilon",
                        type=float,
                        default=1e-08,
                        help="train opt; Adam optimizer epsilon value")
    parser.add_argument("--clip_norm",
                        type=float,
                        default=5.0,
                        help='train opt; global clip norm value')
    parser.add_argument(
        "--data_norm",
        action='store_true',
        help="train opt; Whether to L2-normalize the w2v word vectors")
    parser.add_argument("--lstm_input_dropout",
                        type=float,
                        default=0.5,
                        help="train opt; probability to keep lstm input nodes")
    parser.add_argument("--dropout",
                        type=float,
                        default=0.5,
                        help="train opt; probability to keep all other nodes")
    parser.add_argument(
        "--data_dir",
        required=True,
        type=lambda f: util.arg_path_exists(parser, f),
        help="Directory containing raw/, feats/, and scores/ directories")
    parser.add_argument("--data_root",
                        type=str,
                        required=True,
                        help="Data file root (eg. flickr30k_train)")
    parser.add_argument(
        "--eval_data_root",
        type=str,
        help="Data file root for eval data (eg. flickr30k_dev)")
    parser.add_argument("--train", action='store_true', help='Trains a model')
    parser.add_argument(
        "--activation",
        choices=['sigmoid', 'tanh', 'relu', 'leaky_relu'],
        default='relu',
        help='train opt; which nonlinear activation function to use')
    parser.add_argument("--predict",
                        action='store_true',
                        help='Predicts using pre-trained model')
    parser.add_argument("--model_file",
                        type=str,
                        help="Model file to save/load")
    parser.add_argument("--embedding_type",
                        choices=['w2v', 'glove'],
                        default='w2v',
                        help="Word embedding type to use")
    parser.add_argument(
        "--early_stopping",
        action='store_true',
        help="Whether to implement early stopping based on the " +
        "evaluation performance")
    parser.add_argument("--skip_epoch_eval",
                        action='store_true',
                        help='Skips evaluation each epoch during training')
    parser.add_argument("--encoding_scheme",
                        choices=['first_last_sentence', 'first_last_mention'],
                        default='first_last_mention')
    parser.add_argument("--task", required=True, choices=['nonvis', 'card'])
    args = parser.parse_args()
    arg_dict = vars(args)

    task = arg_dict['task']
    if arg_dict['train']:
        arg_dict['model_file'] = "/home/ccervan2/models/tacl201801/" + \
                                 nn_data.build_model_filename(arg_dict, task + "_lstm")
    model_file = arg_dict['model_file']
    util.dump_args(arg_dict, log)

    # Construct data files from the root directory and filename
    data_dir = arg_dict['data_dir'] + "/"
    data_root = arg_dict['data_root']
    eval_data_root = arg_dict['eval_data_root']
    sentence_file = data_dir + "raw/" + data_root + "_captions.txt"
    mention_idx_file = data_dir + "raw/" + data_root + "_mentions_" + task + ".txt"
    feature_file = data_dir + "feats/" + data_root + "_" + task + "_neural.feats"
    feature_meta_file = data_dir + "feats/" + data_root + "_" + task + "_neural_meta.json"
    if eval_data_root is not None:
        eval_sentence_file = data_dir + "raw/" + eval_data_root + "_captions.txt"
        eval_mention_idx_file = data_dir + "raw/" + eval_data_root + "_mentions_" + task + ".txt"
        eval_feature_file = data_dir + "feats/" + eval_data_root + "_" + task + "_neural.feats"
        eval_feature_meta_file = data_dir + "feats/" + eval_data_root + "_" + task + "_neural_meta.json"
    #endif

    # Load the appropriate word embeddings
    embedding_type = arg_dict['embedding_type']
    if embedding_type == 'w2v':
        log.info("Initializing word2vec")
        nn_data.init_w2v()
    elif embedding_type == 'glove':
        log.info("Initializing glove")
        nn_data.init_glove()
    #endif

    # Set the random seeds identically every run
    nn_util.set_random_seeds()

    # Set up the minimum tensorflow logging level
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    # Train, if training was specified
    if arg_dict['train']:
        train(task=task,
              encoding_scheme=arg_dict['encoding_scheme'],
              embedding_type=embedding_type,
              sentence_file=sentence_file,
              mention_idx_file=mention_idx_file,
              feature_file=feature_file,
              feature_meta_file=feature_meta_file,
              epochs=arg_dict['epochs'],
              batch_size=arg_dict['batch_size'],
              lstm_hidden_width=arg_dict['lstm_hidden_width'],
              start_hidden_width=arg_dict['start_hidden_width'],
              hidden_depth=arg_dict['hidden_depth'],
              weighted_classes=arg_dict['weighted_classes'],
              lstm_input_dropout=arg_dict['lstm_input_dropout'],
              dropout=arg_dict['dropout'],
              lrn_rate=arg_dict['learn_rate'],
              clip_norm=arg_dict['clip_norm'],
              data_norm=arg_dict['data_norm'],
              adam_epsilon=arg_dict['adam_epsilon'],
              activation=arg_dict['activation'],
              model_file=model_file,
              eval_sentence_file=eval_sentence_file,
              eval_mention_idx_file=eval_mention_idx_file,
              eval_feature_file=eval_feature_file,
              eval_feature_meta_file=eval_feature_meta_file,
              early_stopping=arg_dict['early_stopping'],
              log=log)
    elif arg_dict['predict']:
        scores_file = data_dir + "scores/" + data_root + "_" + task + ".scores"

        # Restore our variables
        tf.reset_default_graph()
        with tf.Session() as sess:
            saver = tf.train.import_meta_graph(model_file + ".meta")
            saver.restore(sess, model_file)

            predict(task=task,
                    encoding_scheme=arg_dict['encoding_scheme'],
                    embedding_type=embedding_type,
                    tf_session=sess,
                    batch_size=arg_dict['batch_size'],
                    sentence_file=sentence_file,
                    mention_idx_file=mention_idx_file,
                    feature_file=feature_file,
                    feature_meta_file=feature_meta_file,
                    scores_file=scores_file,
                    log=log)
Beispiel #31
0
class Pipeline(object):
    def __init__(self, hostname="scanme.nmap.org", config=None):
        self.config = Config.from_file(config) if config else Config()
        self.target = Target(hostname)
        self.tools = []
        self.outdir = self.make_outdir()
        self.assetdir = self.get_assetdir()
        self.logger = Logger(self.config, f"{self.outdir}/logs")

        self.available = self.check_dependencies()
        self.load_tools()
        self.logger.info(f"Pipeline initialized for target: {hostname}")

    # load from config
    def load_tools(self):
        missing = []
        for k, v in self.config.tools():
            tool, *options = v.split(";")
            options = ";".join(options)
            Tool = [
                T for T in TOOLS if T.__name__ == tool and T in self.available
            ]
            if len(Tool) == 0:
                missing.append(tool)
            else:
                self.add_tool(Tool.pop()(options), from_config=True)
        if len(missing) > 0:
            os.system("clear")
            [
                self.logger.error(
                    f"Skipping tool {tool} in configuration (Missing)")
                for tool in missing
            ]
            acknowledge()

    def add_tool(self, tool, from_config=False):
        self.logger.info(f"Adding {tool}")
        for t in self.tools:
            if tool == t:  # same tool with same options...
                self.logger.info(f"Duplicate")
                return
        tool.set_logger(self.logger)
        tool.set_target(self.target)
        tool.set_outdir(f"{self.outdir}/commands")
        tool.set_reportdir(f"{self.outdir}/reports")
        tool.set_assetdir(self.assetdir)
        self.tools.append(tool)
        if from_config == False:
            if not hasattr(self, "tool_iota"):
                # self.tool_iota = len(self.config.tools())
                tool_indexes = [
                    int(k.split("_")[-1]) for k, v in self.config.tools()
                ]
                self.tool_iota = len(tool_indexes) and max(tool_indexes)
            self.tool_iota += 1
            setattr(self.config, f"TOOL_{self.tool_iota}", f"{repr(tool)}")
        self.logger.info(f"Success")
        return -1  # "só" ficar uniforme com o comportamento do remove_tool

    def remove_tool(self, tool):
        self.logger.info(f"Removing {tool}")
        for t in self.tools:
            if tool == t:  # same tool with same options...
                self.tools.remove(t)
                configentry = [v for k, v in self.config.tools()
                               ].index(repr(tool))
                delattr(self.config, f"{self.config.tools()[configentry][0]}")
                self.logger.info(f"Success")
                return -1  # só para sair do menu... (forçar a atualizar)
        self.logger.info(f"Not found")

    # def update_target(self, hostname):
    # self.target.set_target(hostname)
    # need to update for each tool? maybe refactor?

    def run(self):
        if len(self.tools) == 0:
            self.logger.error("Must add tools...")
            return
        missing = missing_tool_dependencies(self.tools)
        if missing:
            self.logger.error(missing)
            self.logger.error(
                "Did not execute. Please fullfil requirements...")
            return
        sorted_tools = sortby_dependencies(self.tools)
        if not sorted_tools: return
        self.logger.debug(
            f"Tool order: {','.join([t.__class__.__name__ for t in sorted_tools])}"
        )

        outputs = {}
        reports = {}
        report_obj = None
        for tool in sorted_tools:
            out, err = tool.run(outputs)
            for p in tool.PROVIDES:
                outputs[p] = out

            if err and not tool.IGNORE_STDERR:
                self.logger.error(f"{err.decode('ascii')}\nExiting...")
                return
            else:
                self.logger.info("Output saved")
                report_obj = tool.report(reports)
                for p in tool.PROVIDES:
                    reports[p] = report_obj
        self.create_report(reports, sorted_tools)

    def create_report(self, reports, sorted_tools):
        outfile = f"{self.outdir}/reports/Report.md"
        title = f"PENSEC - Report of {self.target.hostname}"
        reportfile = MdUtils(file_name=outfile, title=title)

        # "Execute Summary"
        reportfile.new_header(level=3, title="Common Statistics")
        for tool in sorted_tools:
            tool.write_report_summary(reportfile, reports)
        # "Technical Details"
        for tool in sorted_tools:
            tool.write_report(reportfile, reports)

        reportfile.create_md_file()
        self.logger.info("Report saved in " + outfile)

    def check_dependencies(self):
        self.logger.info("Checking dependencies...")
        dependencies = TOOLS
        available = check_dependencies(dependencies, self.logger)
        if len(dependencies) != len(available):
            acknowledge()
        return available

    def make_outdir(self):
        hostname = self.target.hostname
        timestamp = re.sub(r'[/:]', '-', datetime.now().strftime('%x_%X'))
        outdir = f"{self.config.OUTPUT_DIRECTORY}/{hostname}/{timestamp}"
        os.makedirs(outdir)
        subdirs = ["logs", "commands", "reports"]
        for sd in subdirs:
            os.mkdir(f"{outdir}/{sd}")
        return outdir

    def get_assetdir(self):
        path = Path().absolute()
        return str(path) + "/assets/"

    def viewconfig(self):
        self.logger.info(f"Showing current config:")
        self.logger.debug("\n" + str(self.config))
        print("\n")
        acknowledge()

    def saveconfig(self):
        outfile = input("Config name\n>> ")
        with open(f"config/{outfile}", "w+") as f:
            f.write(str(self.config))
        self.logger.info(f"Config saved to {outfile}")

    # called on exit from main menu
    def cleanup(self):
        self.logger.end()
        from utils.Menu import Menu
        return Menu.EXIT