Exemple #1
0
    def __init__(self, **kargs):

        config_path = kargs.get("config_path")
        self.connections = {}
        self.init_config(config_path)
        self.create_server()
        self.create_epoll()
        self.logger = FileLogger("LogServer").create()
Exemple #2
0
 def __init__(self):
     self._links = []
     self._domains = []
     self._grab = Grab()
     self._ok_codes = [200, 301, 302, 401]
     self._max_depth = 0
     self._clog = ConsoleLogger()
     self._flog = FileLogger()
Exemple #3
0
    def log_episode(run_name, output_directory, run_directory, conf_directory,
                    conf_file, max_frames, total_episodes, total_frames,
                    total_duration, total_original_rewards,
                    total_shaped_rewards, episode_frames,
                    episode_original_reward, episode_shaped_reward,
                    episode_loss, episode_duration):
        ConsoleLogger.log_episode(
            output_directory=output_directory,
            run_directory=run_directory,
            max_frames=max_frames,
            total_episodes=total_episodes + 1,
            total_frames=total_frames,
            total_duration=total_duration,
            total_original_rewards=total_original_rewards,
            total_shaped_rewards=total_shaped_rewards,
            episode_frames=episode_frames + 1,
            episode_original_reward=episode_original_reward,
            episode_shaped_reward=episode_shaped_reward,
            episode_loss=episode_loss,
            episode_duration=episode_duration)

        FileLogger.log_episode(output_directory=output_directory,
                               run_directory=run_directory,
                               max_frames=max_frames,
                               total_episodes=total_episodes + 1,
                               total_frames=total_frames,
                               total_duration=total_duration,
                               total_original_rewards=total_original_rewards,
                               total_shaped_rewards=total_shaped_rewards,
                               episode_frames=episode_frames + 1,
                               episode_original_reward=episode_original_reward,
                               episode_shaped_reward=episode_shaped_reward,
                               episode_loss=episode_loss,
                               episode_duration=episode_duration)

        TelegramLogger.log_episode(
            run_name=run_name,
            output_directory=output_directory,
            run_directory=run_directory,
            conf_directory=conf_directory,
            conf_file=conf_file,
            max_frames=max_frames,
            total_episodes=total_episodes + 1,
            total_frames=total_frames,
            total_duration=total_duration,
            total_original_rewards=total_original_rewards,
            total_shaped_rewards=total_shaped_rewards,
            episode_frames=episode_frames + 1,
            episode_original_reward=episode_original_reward,
            episode_shaped_reward=episode_shaped_reward,
            episode_loss=episode_loss,
            episode_duration=episode_duration)
Exemple #4
0
 def configure_logging(self):
     """Configure the experiment"""
     if self.comm is None or self.rank == 0:
         log_path = self.get_log_path()
         formats_strs = ['stdout', 'log', 'csv']
         fmtstr = "configuring logger"
         if self.comm is not None and self.rank == 0:
             fmtstr += " [master]"
         logger.info(fmtstr)
         logger.configure(dir_=log_path, format_strs=formats_strs)
         fmtstr = "logger configured"
         if self.comm is not None and self.rank == 0:
             fmtstr += " [master]"
         logger.info(fmtstr)
         logger.info("  directory: {}".format(log_path))
         logger.info("  output formats: {}".format(formats_strs))
         # In the same log folder, log args in yaml in yaml file
         file_logger = FileLogger(uuid=self.uuid,
                                  path=self.get_log_path(),
                                  file_prefix=self.name_prefix)
         file_logger.set_info('note', self.args.note)
         file_logger.set_info('uuid', self.uuid)
         file_logger.set_info('task', self.args.task)
         file_logger.set_info('args', str(self.args))
         fmtstr = "experiment configured"
         if self.comm is not None:
             fmtstr += " [{} MPI workers]".format(self.comm.Get_size())
         logger.info(fmtstr)
     else:
         logger.info("configuring logger [worker #{}]".format(self.rank))
         logger.configure(dir_=None, format_strs=None)
         logger.set_level(logger.DISABLED)
 def __init__(self):
     self._links = []
     self._domains = []
     self._grab = Grab()
     self._ok_codes = [200, 301, 302, 401]
     self._max_depth = 0
     self._clog = ConsoleLogger()
     self._flog = FileLogger()
    def _submit_job(self):
        self.num_jobs_running += 1
        job_id, dir_name = self._create_new_job()

        # prepare file logger
        file_logger = FileLogger(action=self.parse_results,
                                 path=dir_name,
                                 pattern='*COMPLETED*')
        file_logger.start()
        self.FILE_LOGGERS[job_id] = file_logger

        # submit job
        #        subprocess.call('python %s/vae_rnn.py %s' % (dir_name, dir_name), shell = True)
        import shlex
        print("START THE JOB")
        command = 'python %s/MNIST_PyT_hparams.py %s' % (dir_name, dir_name)
        args = shlex.split(command)
        #        process = subprocess.Popen(args, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
        process = subprocess.Popen(args)
Exemple #7
0
    def start(self, particles: ParticlesCells):
        clock = pygame.time.Clock()
        screen = pygame.display.set_mode(
            (self.window_width, self.window_height))

        scale = self.calculate_and_check_scale()
        painter = PygamePainter(screen, scale)
        logger = PlotLogger()
        fileLogger = FileLogger('last_particles_state', 'csv')
        calculator = Calculator(self.width,
                                self.height,
                                particles,
                                delta_time=self.delta_time,
                                cut_off_distance=self.cut_off_distance)

        center_of_mass_velocity = calculate_center_of_mass_velocity(particles)
        for p in particles.iterate_throw_particles():
            p.velocity.coordinates -= center_of_mass_velocity

        condition = True
        iteration_index = 1
        while condition:
            clock.tick(self.ticks_per_second)

            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    condition = False

            calculator.iteration_2(particles,
                                   log_time=(iteration_index % 50 == 0))
            logger.log_particles(particles)

            painter.draw_particles(particles)

            pygame.display.update()

            if (iteration_index % 1000 == 0):
                print(f'Iteration {iteration_index} - DONE')
            iteration_index += 1

        fileLogger.log_particles(particles)
        logger.dispose()
Exemple #8
0
    def __init__(self, config_path, log_path=None):
        global log
        log = bar_logger.BarLogger("Karonte", "DEBUG")

        self._config = json.load(open(config_path))
        self._pickle_parsers = self._config['pickle_parsers']
        self._border_bins = [str(x) for x in self._config['bin']
                             ] if self._config['bin'] else []

        self._fw_path = self._config['fw_path']
        if os.path.isfile(self._fw_path):
            self._fw_path = unpack_firmware(self._fw_path)

        if log_path is None:
            if 'log_path' in self._config and self._config['log_path']:
                log_path = self._config['log_path']
            else:
                log_path = DEFAULT_LOG_PATH

        self._klog = FileLogger(log_path)
        self._add_stats = 'true' == self._config['stats'].lower()

        log.info("Logging at: %s" % log_path)
        log.info("Firmware directory: %s" % self._fw_path)
# Calculate ler every [num_steps] batch
num_steps = 20

# Directories for training, dev and log from conf.json
Dev_DIR = c.LSTM.DEV_PATH
Train_DIR = c.LSTM.TRAIN_PATH
Log_DIR = c.LSTM.LOG_PATH

# Validation list and val_batch_size
dev_list = shuffle_every_epoch(Dev_DIR)
dev_size = len(dev_list)

# File log
file_logger_batch = FileLogger(
    'out_batch.tsv',
    ['curr_epoch', 'batch', 'train_cost', 'train_ler', 'original', 'decode'])

file_logger_epoch = FileLogger('out_epoch.tsv', [
    'curr_epoch', 'train_cost', 'train_ler', 'val_cost', 'val_ler',
    'val_original', 'val_decoded'
])

graph = tf.Graph()
with graph.as_default():
    # Has size [batch_size, max_step_size, num_features], but the
    # batch_size and max_step_size can vary along each step
    inputs = tf.placeholder(tf.float32, [None, None, num_features],
                            name='InputData')
    # Here we use sparse_placeholder that will generate a
    # SparseTensor required by ctc_loss op.
Exemple #10
0
# -*- coding: utf-8 -*-

import socket
import json
import time
import struct
import fcntl
import os

try:
    from file_logger import FileLogger
    logger = FileLogger("LogClient").create()
except:

    def logger():
        pass


class Client:
    def __init__(self, host=None, port=None, nonblock=True):
        if host is None:
            self.host = "127.0.0.1"
        if port is None:
            self.port = 6103
        self.nonblock = nonblock
        self.byte_order = "<"  # little endian
        self.create_sock()

    def create_sock(self):
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.sock.connect((self.host, self.port))
Exemple #11
0
num_classes = ord('z') - ord('a') + 1 + 1 + 1

# Hyper-parameters
num_epochs = 1
num_hidden = 256
batch_size = 346

num_examples = 1
num_batches_per_epoch = 10

audio = AudioReader(audio_dir=None,
                    cache_dir='test_1_cache',
                    sample_rate=sample_rate)

file_logger = FileLogger(
    'out_test.tsv',
    ['curr_epoch', 'train_cost', 'train_ler', 'val_cost', 'val_ler'])


def next_batch(bs=batch_size, train=True):
    x_batch = []
    y_batch = []
    seq_len_batch = []
    original_batch = []
    i = 0
    for k in range(bs):
        ut_length_dict = dict([(k, len(v['target']))
                               for (k, v) in audio.cache.items()])
        utterances = sorted(ut_length_dict.items(), key=operator.itemgetter(0))
        test_index = 346
        if train:
Exemple #12
0
    def log_parameters(
            run_name, output_directory, run_directory, conf_directory,
            conf_file, environment_id, batch_size, learning_rate, gamma,
            eps_start, eps_end, eps_decay, num_atoms, vmin, vmax, eta, beta,
            lambda1, normalize_shaped_reward, reward_shaping_dropout_rate,
            target_update_rate, model_save_rate, episode_log_rate,
            replay_memory_size, num_frames,
            reward_pong_player_racket_hits_ball,
            reward_pong_player_racket_covers_ball,
            reward_pong_player_racket_close_to_ball_linear,
            reward_pong_player_racket_close_to_ball_quadratic,
            reward_pong_opponent_racket_hits_ball,
            reward_pong_opponent_racket_covers_ball,
            reward_pong_opponent_racket_close_to_ball_linear,
            reward_pong_opponent_racket_close_to_ball_quadratic,
            reward_breakout_player_racket_hits_ball,
            reward_breakout_player_racket_covers_ball,
            reward_breakout_player_racket_close_to_ball_linear,
            reward_breakout_player_racket_close_to_ball_quadratic,
            reward_breakout_ball_hitting_upper_block,
            reward_space_invaders_player_avoids_line_of_fire,
            reward_freeway_distance_walked, reward_freeway_distance_to_car,
            reward_ms_pacman_far_from_enemy, reward_potential_based):

        ConsoleLogger.log_parameters(
            run_name=run_name,
            output_directory=output_directory,
            run_directory=run_directory,
            environment_id=environment_id,
            batch_size=batch_size,
            learning_rate=learning_rate,
            gamma=gamma,
            eps_start=eps_start,
            eps_end=eps_end,
            eps_decay=eps_decay,
            num_atoms=num_atoms,
            vmin=vmin,
            vmax=vmax,
            eta=eta,
            beta=beta,
            lambda1=lambda1,
            normalize_shaped_reward=normalize_shaped_reward,
            reward_shaping_dropout_rate=reward_shaping_dropout_rate,
            target_update_rate=target_update_rate,
            model_save_rate=model_save_rate,
            episode_log_rate=episode_log_rate,
            replay_memory_size=replay_memory_size,
            num_frames=num_frames,
            reward_pong_player_racket_hits_ball=
            reward_pong_player_racket_hits_ball,
            reward_pong_player_racket_covers_ball=
            reward_pong_player_racket_covers_ball,
            reward_pong_player_racket_close_to_ball_linear=
            reward_pong_player_racket_close_to_ball_linear,
            reward_pong_player_racket_close_to_ball_quadratic=
            reward_pong_player_racket_close_to_ball_quadratic,
            reward_pong_opponent_racket_hits_ball=
            reward_pong_opponent_racket_hits_ball,
            reward_pong_opponent_racket_covers_ball=
            reward_pong_opponent_racket_covers_ball,
            reward_pong_opponent_racket_close_to_ball_linear=
            reward_pong_opponent_racket_close_to_ball_linear,
            reward_pong_opponent_racket_close_to_ball_quadratic=
            reward_pong_opponent_racket_close_to_ball_quadratic,
            reward_breakout_player_racket_hits_ball=
            reward_breakout_player_racket_hits_ball,
            reward_breakout_player_racket_covers_ball=
            reward_breakout_player_racket_covers_ball,
            reward_breakout_player_racket_close_to_ball_linear=
            reward_breakout_player_racket_close_to_ball_linear,
            reward_breakout_player_racket_close_to_ball_quadratic=
            reward_breakout_player_racket_close_to_ball_quadratic,
            reward_breakout_ball_hitting_upper_block=
            reward_breakout_ball_hitting_upper_block,
            reward_space_invaders_player_avoids_line_of_fire=
            reward_space_invaders_player_avoids_line_of_fire,
            reward_freeway_distance_walked=reward_freeway_distance_walked,
            reward_freeway_distance_to_car=reward_freeway_distance_to_car,
            reward_ms_pacman_far_from_enemy=reward_ms_pacman_far_from_enemy,
            reward_potential_based=reward_potential_based)

        FileLogger.log_parameters(
            run_name=run_name,
            output_directory=output_directory,
            run_directory=run_directory,
            environment_id=environment_id,
            batch_size=batch_size,
            learning_rate=learning_rate,
            gamma=gamma,
            eps_start=eps_start,
            eps_end=eps_end,
            eps_decay=eps_decay,
            num_atoms=num_atoms,
            vmin=vmin,
            vmax=vmax,
            eta=eta,
            beta=beta,
            lambda1=lambda1,
            normalize_shaped_reward=normalize_shaped_reward,
            reward_shaping_dropout_rate=reward_shaping_dropout_rate,
            target_update_rate=target_update_rate,
            model_save_rate=model_save_rate,
            episode_log_rate=episode_log_rate,
            replay_memory_size=replay_memory_size,
            num_frames=num_frames,
            reward_pong_player_racket_hits_ball=
            reward_pong_player_racket_hits_ball,
            reward_pong_player_racket_covers_ball=
            reward_pong_player_racket_covers_ball,
            reward_pong_player_racket_close_to_ball_linear=
            reward_pong_player_racket_close_to_ball_linear,
            reward_pong_player_racket_close_to_ball_quadratic=
            reward_pong_player_racket_close_to_ball_quadratic,
            reward_pong_opponent_racket_hits_ball=
            reward_pong_opponent_racket_hits_ball,
            reward_pong_opponent_racket_covers_ball=
            reward_pong_opponent_racket_covers_ball,
            reward_pong_opponent_racket_close_to_ball_linear=
            reward_pong_opponent_racket_close_to_ball_linear,
            reward_pong_opponent_racket_close_to_ball_quadratic=
            reward_pong_opponent_racket_close_to_ball_quadratic,
            reward_breakout_player_racket_hits_ball=
            reward_breakout_player_racket_hits_ball,
            reward_breakout_player_racket_covers_ball=
            reward_breakout_player_racket_covers_ball,
            reward_breakout_player_racket_close_to_ball_linear=
            reward_breakout_player_racket_close_to_ball_linear,
            reward_breakout_player_racket_close_to_ball_quadratic=
            reward_breakout_player_racket_close_to_ball_quadratic,
            reward_breakout_ball_hitting_upper_block=
            reward_breakout_ball_hitting_upper_block,
            reward_space_invaders_player_avoids_line_of_fire=
            reward_space_invaders_player_avoids_line_of_fire,
            reward_freeway_distance_walked=reward_freeway_distance_walked,
            reward_freeway_distance_to_car=reward_freeway_distance_to_car,
            reward_ms_pacman_far_from_enemy=reward_ms_pacman_far_from_enemy,
            reward_potential_based=reward_potential_based)

        TelegramLogger.log_parameters(
            run_name=run_name,
            output_directory=output_directory,
            run_directory=run_directory,
            conf_directory=conf_directory,
            conf_file=conf_file,
            environment_id=environment_id,
            batch_size=batch_size,
            learning_rate=learning_rate,
            gamma=gamma,
            eps_start=eps_start,
            eps_end=eps_end,
            eps_decay=eps_decay,
            num_atoms=num_atoms,
            vmin=vmin,
            vmax=vmax,
            eta=eta,
            beta=beta,
            lambda1=lambda1,
            normalize_shaped_reward=normalize_shaped_reward,
            reward_shaping_dropout_rate=reward_shaping_dropout_rate,
            target_update_rate=target_update_rate,
            model_save_rate=model_save_rate,
            episode_log_rate=episode_log_rate,
            replay_memory_size=replay_memory_size,
            num_frames=num_frames,
            reward_pong_player_racket_hits_ball=
            reward_pong_player_racket_hits_ball,
            reward_pong_player_racket_covers_ball=
            reward_pong_player_racket_covers_ball,
            reward_pong_player_racket_close_to_ball_linear=
            reward_pong_player_racket_close_to_ball_linear,
            reward_pong_player_racket_close_to_ball_quadratic=
            reward_pong_player_racket_close_to_ball_quadratic,
            reward_pong_opponent_racket_hits_ball=
            reward_pong_opponent_racket_hits_ball,
            reward_pong_opponent_racket_covers_ball=
            reward_pong_opponent_racket_covers_ball,
            reward_pong_opponent_racket_close_to_ball_linear=
            reward_pong_opponent_racket_close_to_ball_linear,
            reward_pong_opponent_racket_close_to_ball_quadratic=
            reward_pong_opponent_racket_close_to_ball_quadratic,
            reward_breakout_player_racket_hits_ball=
            reward_breakout_player_racket_hits_ball,
            reward_breakout_player_racket_covers_ball=
            reward_breakout_player_racket_covers_ball,
            reward_breakout_player_racket_close_to_ball_linear=
            reward_breakout_player_racket_close_to_ball_linear,
            reward_breakout_player_racket_close_to_ball_quadratic=
            reward_breakout_player_racket_close_to_ball_quadratic,
            reward_breakout_ball_hitting_upper_block=
            reward_breakout_ball_hitting_upper_block,
            reward_space_invaders_player_avoids_line_of_fire=
            reward_space_invaders_player_avoids_line_of_fire,
            reward_freeway_distance_walked=reward_freeway_distance_walked,
            reward_freeway_distance_to_car=reward_freeway_distance_to_car,
            reward_ms_pacman_far_from_enemy=reward_ms_pacman_far_from_enemy,
            reward_potential_based=reward_potential_based)
Exemple #13
0
# Calculate ler every [num_steps] batch
num_steps = 20

# Directories for training, dev and log from conf.json
Dev_DIR = c.LSTM.DEV_PATH
Train_DIR = c.LSTM.TRAIN_PATH
Log_DIR = c.LSTM.LOG_PATH

# Validation list and val_batch_size
dev_list = shuffle_every_epoch(Dev_DIR)
dev_size = len(dev_list)

# File log
file_logger_batch = FileLogger('out_batch.tsv', ['curr_epoch',
                                                 'batch',
                                                 'train_cost',
                                                 'train_ler',
                                                 'original',
                                                 'decode'])

file_logger_epoch = FileLogger('out_epoch.tsv', ['curr_epoch',
                                                 'train_cost',
                                                 'train_ler',
                                                 'val_cost',
                                                 'val_ler',
                                                 'val_original',
                                                 'val_decoded'])

graph = tf.Graph()
with graph.as_default():
    # Has size [batch_size, max_step_size, num_features], but the
    # batch_size and max_step_size can vary along each step
Exemple #14
0
    def on_epoch_end(self, epoch, logs={}):
        file_logger.write([str(epoch),
                           str(logs['loss']),
                           str(logs['val_loss']),
                           str(logs['acc']),
                           str(logs['val_acc'])])


if __name__ == '__main__':
    model_name = 'm11'

    args = sys.argv
    if len(args) == 2:
        model_name = args[1].lower()
    print('Model selected:', model_name)
    file_logger = FileLogger('out_{}.tsv'.format(model_name), ['step', 'tr_loss', 'te_loss',
                                                               'tr_acc', 'te_acc'])
    model = None
    num_classes = 5
    if model_name == 'm3':
        model = m3(num_classes=num_classes)
    elif model_name == 'm5':
        model = m5(num_classes=num_classes)
    elif model_name == 'm11':
        model = m11(num_classes=num_classes)
    elif model_name == 'm18':
        model = m18(num_classes=num_classes)
    elif model_name == 'm34':
        model = resnet_34(num_classes=num_classes)

    #model = m_rec(num_classes = num_classes)
Exemple #15
0
class Server:
    def __init__(self, **kargs):

        config_path = kargs.get("config_path")
        self.connections = {}
        self.init_config(config_path)
        self.create_server()
        self.create_epoll()
        self.logger = FileLogger("LogServer").create()

    def init_config(self, config_path=None):
        current_dir = os.path.curdir
        if config_path is None:
            config_path = os.path.join(current_dir, "config.json")
        with open(config_path) as f:
            config = json.loads(f.read())
            self.config = dotmap.DotMap(config)

    def set_signal(self):
        pass

    def create_server(self):
        self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.server.bind((self.config.HOST, self.config.PORT))
        self.server.listen(self.config.MAX_CONN)
        self.server.setblocking(0)
        self.server.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)

    def create_epoll(self):
        self.epoll = select.epoll()
        self.epoll.register(self.server.fileno(), select.EPOLLIN)

    def try_start(self):
        try:
            self.start()
        except Exception as ex:
            print("try_start exception: {}".format(ex))
            self.epoll.unregister(self.server.fileno())
            self.epoll.close()
            self.server.close()

    def start(self):
        self.logger.info("start server on port: {}".format(self.config.PORT))
        while True:
            time.sleep(1)
            events = self.epoll.poll(1)
            for fileno, event in events:
                if fileno == self.server.fileno():
                    conn, addr = self.server.accept()
                    conn.setblocking(0)
                    self.epoll.register(conn.fileno(), select.EPOLLIN)
                    self.connections.update({conn.fileno(): [conn, addr]})
                elif event & select.EPOLLIN:
                    data = self.connections[fileno][0].recv(
                        self.config.MAX_DATA_SIZE)
                    self.deal_with_input(data, fileno)
                elif event & select.EPOLLOUT:
                    self.epoll.modify(fileno, 0)
                    self.connections[fileno][0].shutdown(socket.SHUT_RDWR)
                elif event & select.EPOLLHUP:
                    self.epoll.unregister(fileno)
                    self.connections[fileno][0].close()
                    del self.connections[fileno]

    def deal_with_input(self, data, fileno):
        self.epoll.modify(fileno, select.EPOLLOUT)
        print(data)
Exemple #16
0
def train(args):
    """Train the neural network. Write out model every several iterations. 
    
    Args:
      workspace: str, path of workspace. 
      tr_snr: float, training SNR. 
      te_snr: float, testing SNR. 
      lr: float, learning rate. 
    """
    class MetricsHistory(Callback):
        def on_epoch_end(self, epoch, logs={}):
            file_logger.write([str(epoch),
                           str(logs['loss']),
                           str(logs['val_loss'])
                           ])
    
    
    
    print(args)
    workspace = args.workspace

    #tr_snr = args.tr_snr
    #te_snr = args.te_snr
    lr = args.lr
    #TF = args.TF
    model_name = args.model_name
    #model_save_dir = os.path.join(args.workspace, 'saved_models')
    
    # Load data
    t1 = time.time()
    print("Loading the train and vallidation dataset")
    tr_hdf5_path = os.path.join(workspace, "packed_features", "train", "mag.h5")
    te_hdf5_path = os.path.join(workspace, "packed_features", "val", "mag.h5")
    (tr_x, tr_y) = pp_data.load_hdf5(tr_hdf5_path)
    (te_x, te_y) = pp_data.load_hdf5(te_hdf5_path)
    
    print('train_x shape:')
    print(tr_x.shape, tr_y.shape)
    print('test_x shape:')
    print(te_x.shape, te_y.shape)
    print("Load data time: %f s" % (time.time() - t1))
    print('\n')
    
    # Scale data
    if True:
        print("Scaling train and test dataset. This will take some time, please wait patiently...")
        t1 = time.time()
        scaler_path = os.path.join(workspace, "packed_features", "train", "mag_scaler.p")
        scaler = pickle.load(open(scaler_path, 'rb'))
        tr_x = pp_data.scale_on_3d(tr_x, scaler)
        tr_y = pp_data.scale_on_2d(tr_y, scaler)
        te_x = pp_data.scale_on_3d(te_x, scaler)
        te_y = pp_data.scale_on_2d(te_y, scaler)
        print("Scale data time: %f s" % (time.time() - t1))
        
    # Debug plot. 
    if False:
        plt.matshow(tr_x[0 : 1000, 0, :].T, origin='lower', aspect='auto', cmap='jet')
        plt.show()
        #time.sleep(secs)
        os.system("pause")
        
    # Build model
    batch_size = 150
    epoch = 100
    print("The neural networks you have chosed is %s" % model_name)
    print("The training batch is set to %d and the %s will be training for at most %d epoches" % (batch_size, model_name.upper(), epoch))
    print("======iteration of one epoch======" )
    iter_each_epoch = int(tr_x.shape[0] / batch_size)
    #val_each_epoch = int(te_x.shape[0] / batch_size)
    #print("There are %d iterations / epoch" % int(tr_x.shape[0] / batch_size))
    print("There are %d iterations / epoch" % iter_each_epoch)
    
    log_save_dir = os.path.join(workspace, 'log')
    if not os.path.isdir(log_save_dir):
        os.makedirs(log_save_dir)
    log_path = os.path.join(log_save_dir, 'out_{}.csv'.format(model_name))
    #log_path = os.path.join(log_save_dir, 'out_%ddb_%s.csv' %(int(snr[0]), model_name))
    file_logger = FileLogger(log_path, ['epoch', 'train_loss', 'val_loss'])
    
    (_, n_concat, n_freq) = tr_x.shape
    #temp_tr_x = tr_x[:, 3, :][:, np.newaxis, :]
    #print(temp_tr_x.shape)
    #np.axis
    n_hid = 2048
    
    #data_gen = DataGenerator(batch_size=batch_size, type='train')
    #tr_gen = data_gen.generate(xs=[tr_x], ys=[tr_y])
    #te_gen = data_gen.generate(xs=[te_x], ys=[te_y])
    #temp_tr_x = tr_gen[:, 3, :][:, np.newaxis, :]
    
    
    '''
    model = Sequential()
    model.add(Flatten(input_shape=(n_concat, n_freq)))
    model.add(BatchNormalization())
    model.add(Dense(n_hid, activation='relu', kernel_regularizer=regularizers.l2(l=0.0001)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(Dense(n_hid, activation='relu', kernel_regularizer=regularizers.l2(l=0.0001)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(Dense(n_hid, activation='relu', kernel_regularizer=regularizers.l2(l=0.0001)))
    model.add(Dropout(0.2))
    model.add(Dense(n_freq, activation='linear'))
    #model.summary()
    '''
    
    
    print('Model selected:', model_name.lower())
    if model_name == 'dnn':
        model = dnn(n_hid, n_concat, n_freq)
    
    elif model_name == 'sdnn1':
        model = sdnn1(n_hid, n_concat, n_freq)
        
    
    elif model_name == 'sdnn2':
        model = sdnn2(n_hid, n_concat, n_freq)
    
    elif model_name == 'sdnn3':
        model = sdnn3(n_hid, n_concat, n_freq)
    
    elif model_name == 'fcn':
        model = fcn(n_concat, n_freq)
        
    elif model_name == 'fcn1':
        model = fcn1(n_concat, n_freq)
        
    elif model_name == 'fcn1':
        model = fcn1_re(n_concat, n_freq)
    
    elif model_name == 'fcn2':
        model = fcn2(n_concat, n_freq)
        
    elif model_name == 'fcn3':
        model = fcn3(n_concat, n_freq)
        
    elif model_name == 'fcn4':
        model = fcn4(n_concat, n_freq)
        
    elif model_name == 'm_vgg':
        model = m_vgg(n_concat, n_freq)
        
    elif model_name == 'm_vgg1':
        model = m_vgg1(n_concat, n_freq)
        
    elif model_name == 'm_vgg2':
        model = m_vgg2(n_concat, n_freq)
        
    elif model_name == 'm_vgg3':
        model = m_vgg3(n_concat, n_freq)
        
    elif model_name == 'm_vgg4':
        model = m_vgg3(n_concat, n_freq)
        
    elif model_name == 'CapsNet':
        model = CapsNet(n_concat, n_freq, 3)
        
    elif model_name == 'brnn' :
        recur_layers = 7
        unit = 256
        output_dim = n_freq
        model = brnn(n_concat, n_freq, unit, recur_layers, output_dim)
        
    elif model_name == 'rnn' :
        output_dim = n_freq
        model = rnn(n_concat, n_freq, output_dim)
        
    elif model_name == 'tcn' :
        input_dim = n_freq
        model = tcn(n_concat, input_dim)
        
    if model is None:
        exit('Please choose a valid model: [dnn, sdnn, sdnn1, cnn, scnn1]')
        
   
    #mean_squared_error
    model.compile(loss = 'mean_squared_error',
                  optimizer=Adam(lr=lr))
    
    print(model.summary())
    #plot model
    #plot_model(model, to_file=args.save_dir+'/model.png', show_shapes=True)
    #plot_model(model, to_file='%s/%s_model.png' % (log_save_dir, model_name), show_shapes=True)
    # Save model and weights
    model_save_dir = os.path.join(workspace, 'saved_models', "%s" % model_name)
    model_save_name = "weights-checkpoint-{epoch:02d}-{val_loss:.2f}.h5"
    if not os.path.isdir(model_save_dir):
        os.makedirs(model_save_dir)
    model_path = os.path.join(model_save_dir, model_save_name)
    checkpoint = ModelCheckpoint(model_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
    print('Saved trained model at %s' % model_save_dir)
    
    
    #reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=4, min_lr=0.00001, verbose=1)
    lr_decay = LearningRateScheduler(schedule=lambda epoch: lr * (0.9 ** epoch))
    metrics_history = MetricsHistory()
    
    hist = model.fit(x=tr_x,
                     y=tr_y,
                     batch_size=batch_size,
                     epochs=epoch,
                     verbose=1,
                     shuffle=True,
                     validation_data=(te_x, te_y),
                     #validation_split=0.1,
                     callbacks=[metrics_history, checkpoint, lr_decay])
    '''
    hist = model.fit_generator(tr_gen, 
                               steps_per_epoch=iter_each_epoch, 
                               epochs=epoch, 
                               verbose=1, 
                               validation_data=te_gen, 
                               validation_steps=val_each_epoch, 
                               callbacks=[metrics_history, checkpoint, reduce_lr])

    '''
    
    print(hist.history.keys())
    
    # list all data in history
    #print(hist.history.keys())
    '''
    # summarize history for accuracy
    plt.plot(hist.history['acc'])
    plt.plot(hist.history['val_acc'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()
    '''
    # summarize history for loss
    model_png = "train_test_loss"
    loss_fig_dir = os.path.join(log_save_dir, '%s_%s.png' % (model_name, model_png))
    plt.plot(hist.history['loss'])
    plt.plot(hist.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper right')
    plt.savefig(loss_fig_dir)
    #plt.show()
    
    
    
    '''
    fig = plt.gcf()
    plt.show()
    fig.savefig('tessstttyyy.png', dpi=100)
    '''
    
    file_logger.close()
    
    
    
    '''
    # Data generator. 
    tr_gen = DataGenerator(batch_size=batch_size, type='train')
    eval_te_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100)
    eval_tr_gen = DataGenerator(batch_size=batch_size, type='test', te_max_iter=100)
    
    # Directories for saving models and training stats
    model_dir = os.path.join(workspace, "models", "%ddb" % int(tr_snr))
    pp_data.create_folder(model_dir)
    
    stats_dir = os.path.join(workspace, "training_stats", "%ddb" % int(tr_snr))
    pp_data.create_folder(stats_dir)
    
    # Print loss before training. 
    iter = 0
    tr_loss = eval(model, eval_tr_gen, tr_x, tr_y)
    te_loss = eval(model, eval_te_gen, te_x, te_y)
    print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss))
    
    # Save out training stats. 
    stat_dict = {'iter': iter, 
                    'tr_loss': tr_loss, 
                    'te_loss': te_loss, }
    stat_path = os.path.join(stats_dir, "%diters.p" % iter)
    cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL)
    
    # Train. 
    t1 = time.time()
    for (batch_x, batch_y) in tr_gen.generate(xs=[tr_x], ys=[tr_y]):
        #loss = model.train_on_batch(batch_x, batch_y)
 	if iter % 2000 == 0:
            lr *= 0.1
        model.train_on_batch(batch_x, batch_y)
        iter += 1
        
        
        # Validate and save training stats. 
        if iter % 1000 == 0:
            tr_loss = eval(model, eval_tr_gen, tr_x, tr_y)
            te_loss = eval(model, eval_te_gen, te_x, te_y)
            print("Iteration: %d, tr_loss: %f, te_loss: %f" % (iter, tr_loss, te_loss))
            
            # Save out training stats. 
            stat_dict = {'iter': iter, 
                         'tr_loss': tr_loss, 
                         'te_loss': te_loss, }
            stat_path = os.path.join(stats_dir, "%diters.p" % iter)
            cPickle.dump(stat_dict, open(stat_path, 'wb'), protocol=cPickle.HIGHEST_PROTOCOL)
            
        # Save model. 
        if iter % 5000 == 0:
            model_path = os.path.join(model_dir, "md_%diters.h5" % iter)
            model.save(model_path)
            print("Saved model to %s" % model_path)
        
        if iter == 10001:
            break
     '''     
    print("Training time: %s s" % (time.time() - t1,))
Exemple #17
0
            str(logs['val_loss']),
            str(logs['acc']),
            str(logs['val_acc'])
        ])


############# GET MODEL PARAMETERS #################
seth = GetValues()
model = 'DNN'
prep = 'dev'
folds = 4
##MAKE YOUR CHANGES
#dropout=0.1
#act!=linear

file_logger = FileLogger('out_{}.tsv'.format(model),
                         ['step', 'tr_loss', 'te_loss', 'tr_acc', 'te_acc'])

############# EXTRACT FEATURES #####################

dropout, act1, act2,act3, input_neurons, epochs, batchsize, num_classes, _, _,\
loss,nb_filter, filter_length,pool_size, optimizer=seth.get_parameters()

############# LOAD DATA ###########################
tr_X, tr_y = seth.get_train_data('logmel_trainx2.cpkl')
v_X, v_y = seth.get_val_data('logmel_testx2.cpkl')

dimx = tr_X.shape[-2]
dimy = tr_X.shape[-1]
tr_X = aud_utils.mat_3d_to_nd(model, tr_X)
v_X = aud_utils.mat_3d_to_nd(model, v_X)
class Spider:
    def __init__(self):
        self._links = []
        self._domains = []
        self._grab = Grab()
        self._ok_codes = [200, 301, 302, 401]
        self._max_depth = 0
        self._clog = ConsoleLogger()
        self._flog = FileLogger()

    def add_url(self, url):
        self._links += [SpiderLink(url, 0)]

    def add_domain(self, domain):
        self._domains += [domain]

    def set_max_depth(self, depth):
        self._max_depth = depth

    def start(self):
        # Уже обработанные url
        visited = set([])

        while len(self._links) > 0:
            spider_link = self._links.pop(0)
            self._clog.show_progress(spider_link.url, len(visited), len(self._links) + 1)
            if spider_link.url in visited:
                continue
            visited = visited | {spider_link.url}
            # Пытаемся загрузить страницу
            is_loaded = self.try_load_url(spider_link.url)
            if not is_loaded:
                self._clog.add_error_message(self._grab.response.code, spider_link)
                self._flog.add_error_message(self._grab.response.code, spider_link)
                continue
            # Проверка на глубину
            if 0 < self._max_depth == spider_link.depth:
                continue
            # Если страница за пределами доменов, парсить не нужно
            in_domain = False
            for d in self._domains:
                if d in spider_link.url:
                    in_domain = True
                    break
            if not in_domain:
                continue
            # Парсим страницу на новые ссылки
            new_spider_links = self.get_spider_links(spider_link)
            if len(new_spider_links) == 0:
                continue
            new_spider_links = self.filter_links(new_spider_links)
            new_spider_links = self.remove_visited_links(new_spider_links, visited)
            for l in new_spider_links:
                l.parent = spider_link
            # А теперь можно и добавить в очередь
            self._links += new_spider_links

    def try_load_url(self, url):
        try:
            self._grab.go(url)
        except:
            return False
        if self._grab.response.code not in self._ok_codes:
            return False
        return True

    def get_spider_links(self, spider_link):
        objects = self._grab.doc.select("//body//a")
        spider_links = []
        for o in objects:
            try:
                url = o.attr("href")
            except DataNotFound:
                continue
            if url.startswith("/"):
                url = "http://" + self._grab.response.url_details().hostname + url
            if not url.startswith("http"):
                ind = spider_link.url.rfind("/")
                if ind == -1:
                    url = spider_link.url + "/" + url
                else:
                    url = spider_link.url[:(ind+1)] + url
            spider_links += [SpiderLink(url, spider_link.depth + 1)]
        return spider_links

    def filter_links(self, spider_links):
        filtered = []
        for l in spider_links:
            if l.url is not "" and l.url is not "#":
                filtered += [l]
        return filtered

    def remove_visited_links(self, spider_links, visited_urls):
        res = []
        for s in spider_links:
            if s.url in visited_urls:
                continue
            res += [s]
        return res
Exemple #19
0
class Spider:
    def __init__(self):
        self._links = []
        self._domains = []
        self._grab = Grab()
        self._ok_codes = [200, 301, 302, 401]
        self._max_depth = 0
        self._clog = ConsoleLogger()
        self._flog = FileLogger()

    def add_url(self, url):
        self._links += [SpiderLink(url, 0)]

    def add_domain(self, domain):
        self._domains += [domain]

    def set_max_depth(self, depth):
        self._max_depth = depth

    def start(self):
        # Уже обработанные url
        visited = set([])

        while len(self._links) > 0:
            spider_link = self._links.pop(0)
            self._clog.show_progress(spider_link.url, len(visited),
                                     len(self._links) + 1)
            if spider_link.url in visited:
                continue
            visited = visited | {spider_link.url}
            # Пытаемся загрузить страницу
            is_loaded = self.try_load_url(spider_link.url)
            if not is_loaded:
                self._clog.add_error_message(self._grab.response.code,
                                             spider_link)
                self._flog.add_error_message(self._grab.response.code,
                                             spider_link)
                continue
            # Проверка на глубину
            if 0 < self._max_depth == spider_link.depth:
                continue
            # Если страница за пределами доменов, парсить не нужно
            in_domain = False
            for d in self._domains:
                if d in spider_link.url:
                    in_domain = True
                    break
            if not in_domain:
                continue
            # Парсим страницу на новые ссылки
            new_spider_links = self.get_spider_links(spider_link)
            if len(new_spider_links) == 0:
                continue
            new_spider_links = self.filter_links(new_spider_links)
            new_spider_links = self.remove_visited_links(
                new_spider_links, visited)
            for l in new_spider_links:
                l.parent = spider_link
            # А теперь можно и добавить в очередь
            self._links += new_spider_links

    def try_load_url(self, url):
        try:
            self._grab.go(url)
        except:
            return False
        if self._grab.response.code not in self._ok_codes:
            return False
        return True

    def get_spider_links(self, spider_link):
        objects = self._grab.doc.select("//body//a")
        spider_links = []
        for o in objects:
            try:
                url = o.attr("href")
            except DataNotFound:
                continue
            if url.startswith("/"):
                url = "http://" + self._grab.response.url_details(
                ).hostname + url
            if not url.startswith("http"):
                ind = spider_link.url.rfind("/")
                if ind == -1:
                    url = spider_link.url + "/" + url
                else:
                    url = spider_link.url[:(ind + 1)] + url
            spider_links += [SpiderLink(url, spider_link.depth + 1)]
        return spider_links

    def filter_links(self, spider_links):
        filtered = []
        for l in spider_links:
            if l.url is not "" and l.url is not "#":
                filtered += [l]
        return filtered

    def remove_visited_links(self, spider_links, visited_urls):
        res = []
        for s in spider_links:
            if s.url in visited_urls:
                continue
            res += [s]
        return res
Exemple #20
0
        else:
            print('m_net not in the checkpoint!')

start_iter=0
ctx['start_iter']=0

# loss
if ctx['loss_type']=='l2' or ctx['loss_type']=='mse':
    loss_fn=nn.MSELoss(reduction='sum')
elif ctx['loss_type']=='l1':
    loss_fn=nn.L1Loss(reduction='sum')
print('loss_type',ctx['loss_type'])

# tensorboard
from logger import Logger
log_dir=join(ctx['rundir'],'logs')
print('log dir',log_dir)
ctx['tb_logger']=Logger(log_dir)

if ctx['write_file_log']:
    file_log_dir=join(ctx['rundir'],'file_logs')
    print('file log dir',file_log_dir)
    from file_logger import FileLogger
    ctx['file_logger']=FileLogger(file_log_dir)

# start train/eval
if ctx['eval']=='none': # train
    train_model(dataloaders,net,opt,loss_fn,start_epoch,ctx)
else:
    eval_model(dataloaders[ctx['eval']],net,loss_fn,ctx)
Exemple #21
0
class Karonte:
    def __init__(self, config_path, log_path=None):
        global log
        log = bar_logger.BarLogger("Karonte", "DEBUG")

        self._config = json.load(open(config_path))
        self._pickle_parsers = self._config['pickle_parsers']
        self._border_bins = [str(x) for x in self._config['bin']
                             ] if self._config['bin'] else []

        self._fw_path = self._config['fw_path']
        if os.path.isfile(self._fw_path):
            self._fw_path = unpack_firmware(self._fw_path)

        if log_path is None:
            if 'log_path' in self._config and self._config['log_path']:
                log_path = self._config['log_path']
            else:
                log_path = DEFAULT_LOG_PATH

        self._klog = FileLogger(log_path)
        self._add_stats = 'true' == self._config['stats'].lower()

        log.info("Logging at: %s" % log_path)
        log.info("Firmware directory: %s" % self._fw_path)

    def run(self, analyze_parents=True, analyze_children=True):
        """
        Runs Karonte
        :return:
        """

        self._klog.start_logging()

        bbf = BorderBinariesFinder(self._fw_path,
                                   use_connection_mark=False,
                                   logger_obj=log)

        if not self._border_bins:
            self._border_bins = bbf.run(pickle_file=self._pickle_parsers)
            if not self._border_bins:
                log.error("No border binaries found, exiting...")
                log.info("Finished, results in %s" % self._klog.name)
                log.complete()
                self._klog.save_parser_stats(bbf)
                self._klog.close_log()
                return

        if self._add_stats:
            self._klog.save_parser_stats(bbf)

        # starting the analysis with less strings makes the analysis faster
        pf_str = BorderBinariesFinder.get_network_keywords(
            end=N_TYPE_DATA_KEYS)

        cpfs = [
            environment.Environment, nvram.Nvram, file.File, socket.Socket,
            setter_getter.SetterGetter, semantic.Semantic
        ]
        bdg = BinaryDependencyGraph(self._config,
                                    self._border_bins,
                                    self._fw_path,
                                    init_data_keys=pf_str,
                                    cpfs=cpfs,
                                    logger_obj=log)
        bdg.run()
        if self._add_stats:
            self._klog.save_bdg_stats(bbf, bdg)

        bf = BugFinder(self._config,
                       bdg,
                       analyze_parents,
                       analyze_children,
                       logger_obj=log)
        bf.run(report_alert=self._klog.save_alert,
               report_stats=self._klog.save_stats if self._add_stats else None)

        # Done.
        log.info("Finished, results in %s" % self._klog.name)
        log.complete()

        if self._add_stats:
            self._klog.save_global_stats(bbf, bdg, bf)
        self._klog.close_log()
num_classes = ord('z') - ord('a') + 1 + 1 + 1

# Hyper-parameters
num_epochs = 10000
num_hidden = 100
num_layers = 1
batch_size = 1

num_examples = 1
num_batches_per_epoch = int(num_examples / batch_size)

audio = AudioReader(audio_dir=c.AUDIO.VCTK_CORPUS_PATH,
                    sample_rate=c.AUDIO.SAMPLE_RATE)

file_logger = FileLogger('out.tsv', [
    'curr_epoch', 'train_cost', 'train_ler', 'val_cost', 'val_ler',
    'random_shift'
])


def run_ctc():
    graph = tf.Graph()
    with graph.as_default():
        # e.g: log filter bank or MFCC features
        # Has size [batch_size, max_step_size, num_features], but the
        # batch_size and max_step_size can vary along each step
        inputs = tf.placeholder(tf.float32, [None, None, num_features])

        # Here we use sparse_placeholder that will generate a
        # SparseTensor required by ctc_loss op.
        targets = tf.sparse_placeholder(tf.int32)
Exemple #23
0
#!/usr/bin/env python

import time
import numpy as np
import subprocess

from file_logger import FileLogger

np.set_printoptions(precision=4)


def process(file_name):
    #	print('found new file: ', file_name)
    print('...')
    time.sleep(10)
    subprocess.call('./process_request.py %s' % file_name, shell=True)
    print('done')


logger = FileLogger(action=process, path='./')
while True:
    time.sleep(1)
def main(argv):
    """ Main loop function. """
    """ Parse command line options """
    opts, args = getopt(argv, "vc:")
    conf_path = None

    for opt, arg in opts:
        if opt == "-c":
            conf_path = arg

    if conf_path is None:
        """ Default conf path """
        conf_path = "../configuration.ini"
    """ Create the configuration object """
    conf = Configuration(conf_path)
    """ Set up logging """
    if not conf.get_conf("Client", "debug-log"):
        logging.basicConfig(level=logging.INFO)
    else:
        logging.basicConfig(level=logging.DEBUG)
    _logger = logging.getLogger(__name__)

    if not os.path.isdir("../logs"):
        os.mkdir("../logs")

    formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
    handler = logging.FileHandler("../logs/system_logs.log")
    handler.setFormatter(formatter)

    logging.getLogger('').addHandler(handler)

    _logger.info("Using configuration from: %s", conf_path)
    """ Create the database object """
    db_loc = os.path.join(util.get_root(), conf.get_conf("Client", "database"))
    database = TelemetryDB(db_loc)
    database.init_db()
    """ Update grafana and kaitai configurations """
    if conf.get_conf("Client", "automatic-updating"):
        Updater(conf).checkForUpdates()
    """ Build the other components. """
    ax_listener = AXListener(conf)
    sids_relay = SIDSRelay(conf, database)
    telemetry_listener = TelemetryListener(database)
    file_logger = FileLogger(conf, conf.get_conf("Client", "logs"), "log")
    """ Create the flask app and start it in a forked process. """
    port = conf.get_conf("Client", "frontend-port")
    """ Set the handler for SIGTERM, so we can exit a bit more gracefully. """
    signal.signal(signal.SIGTERM, terminate_handler)
    """ Hook the callbacks to the ax_listener. """
    ax_listener.add_callback(database.insert_ax_frame)
    ax_listener.add_callback(sids_relay.relay)
    ax_listener.add_callback(file_logger.log_ax_frame)
    ax_listener.add_callback(telemetry_listener.receive)

    tnc_pool = TNCPool(conf, ax_listener)
    tnc_pool.connect_main_tnc()

    api_app = api.create_app(conf, tnc_pool, sids_relay)
    """ We set the daemon option to True, so that the client will quit once the other threads have
        finished because we don't have a good way of stopping the Flask app properly. """
    api_thread = Thread(target=api_app.run, kwargs={"port": port}, daemon=True)
    api_thread.start()
    _logger.info("For the GUI open localhost:{}".format(port))

    try:
        """ On windows, the KeyboardInterrupt doesn't break the join. """
        if platform.system() == "Windows":
            while api_thread.isAlive:
                api_thread.join(2)
        else:
            api_thread.join()
    except (KeyboardInterrupt, SystemExit):
        pass
    finally:
        tnc_pool.cleanup()
import numpy as np
from file_logger import FileLogger

file_logger = FileLogger(
    'result.csv',
    ['current_epoch', 'train_cost', 'train_ler', 'val_cost', 'val_ler'])


def CalEpochInterval(fileName, interval):
    import csv
    with open(fileName, newline='') as f:
        f_csv = csv.reader(f)
        index = 0
        epoch = 0
        train_cost = 0
        train_ler = 0
        val_cost = 0
        val_ler = 0
        acc = 0
        headers = next(f_csv)
        for row in f_csv:
            listline = row[0].split(' ')
            print(listline)
            train_cost = train_cost + float(listline[1])
            train_ler = train_ler + float(listline[2])
            val_cost = val_cost + float(listline[3])
            val_ler = val_ler + float(listline[4])
            if (float(listline[4]) < 0.05):
                acc = acc + 1
            index = index + 1
            if index == interval - 1: