示例#1
0
def run_clevr_task(agent, task_id, task_dict):
    log("")
    log("RUN_CLEVR_TASK", color="black_on_white")
    onehot_task_id = get_onehot(task_id, list(agent.tasks.keys()))
    dataset = task_dict.dataset.shuffle(10000)
    total_free_energy = 0.
    priors = []  #agent.priors
    action_index = -1
    loss = 0.
    for image_tensor, embedded_question, one_hot_answer, question, answer in dataset.take(
            task_dict.examples_per_episode):
        expanded_loss = tf.expand_dims(loss, 0)
        inputs = [
            onehot_task_id, expanded_loss, image_tensor, embedded_question
        ]
        inputs = [
            tf.cast(tf.expand_dims(input, axis=0), dtype=tf.float32)
            for input in inputs
        ]
        loss_fn = tf.keras.losses.categorical_crossentropy
        y_true = (one_hot_answer)
        free_energy, priors = agent.train_op(task_id, inputs, action_index,
                                             y_true, loss_fn, priors)
        total_free_energy = total_free_energy + free_energy
    return total_free_energy
示例#2
0
def get_user_tags(args):
    """Return the tags if the args contain the tags file

    If the user used the -t option, parse the specified file. Otherwise,
    return None
    """

    if args.tags is not None:
        return fetcher.get_user_tags(args.tags)
    else:
        log.log("No tags file provided.")
        return None
示例#3
0
def get_question_feed(url, force_reload=False):
    """Retrieve the last questions of the feed

    Returns a structure with the following format:
      [Question_1, Question_2, ...]

    where Question_n has the following keys:
      link: str
      title: str
      body: str (html)
      tags: list of str
    """

    log(bold("Fetching question feed"))
    if force_reload:
        log(fg("Force reload", magenta))
    feed = spider.get_feed(url, force_reload=force_reload)
    if feed.status == 304:  # Not Modified
        log(fg("Feed not modified since last retrieval (status 304)", magenta))
        return []
    log("Number of entries in feed: {}", fg(len(feed.entries), green))
    questions = []
    for entry in feed.entries:
        soup = BeautifulSoup(entry.summary, "html.parser")
        q = {
            "link": entry.link,
            "title": entry.title,
            "body": soup.getText(" ", strip=True),
            "tags": [x["term"] for x in entry.tags],
        }
        questions.append(q)
    return questions
示例#4
0
文件: mol.py 项目: bionicles/neuromax
def run_mol_task(agent, task_key, task_dict):
    dataset = task_dict.dataset_fn()
    log(dataset)
    model = task_dict.model
    total_free_energy = 0.
    onehot_task_key = get_onehot(task_key, list(agent.tasks.keys()))
    for id_string, n_atoms, target, positions, features, masses in dataset.take(task_dict.examples_per_episode):
        prior_code_prediction = tf.zeros(agent.compute_code_shape(task_dict))
        target_distances = get_distances(target, target)
        current = tf.concat([positions, features], -1)
        initial_loss = get_loss(target_distances, current)
        prior_loss_prediction = loss = stop = 0.
        velocities = tf.zeros_like(positions)
        forces = tf.zeros_like(positions)
        for step in range(MAX_STEPS):
            with tf.GradientTape() as tape:
                inputs = [onehot_task_key, initial_loss, current]
                normies, code, actions = model(inputs)
                code_prediction, loss_prediction, reconstructions, forces = agent.unpack_actions(task_key, actions)
                accelerations = forces / masses
                velocities = velocities + accelerations
                noise = tf.random.truncated_normal(tf.shape(positions), stddev=STDDEV)
                positions = positions + velocities + noise
                loss = get_loss(target_distances, current)
                free_energy = agent.compute_free_energy(
                    loss=loss, prior_loss_prediction=prior_loss_prediction,
                    normies=normies, reconstructions=reconstructions,
                    code=code, prior_code_prediction=prior_code_prediction,
                    actions=actions)
            gradients = tape.gradient([free_energy, model.losses],
                                      model.trainable_variables)
            agent.optimizer.apply_gradients(
                zip(gradients, model.trainable_variables))
            total_free_energy = total_free_energy + free_energy
            prior_code_prediction = code_prediction
            prior_loss_prediction = loss_prediction
            loss = tf.reduce_sum(loss)
            new_stop = loss * 1.2
            if step < 1:
                initial_loss = loss
                stop = new_stop
            if new_stop < stop:
                stop = new_stop
            elif step > 0 and (loss > stop or loss != loss):
                break
    return total_free_energy
示例#5
0
    async def unload_cog(self, ctx, extension_name):
        extension_name = extension_name.lower()

        if not extension_name.endswith('_cog'):
            extension_name += '_cog'

        try:
            author = ctx.message.author
            log_channel = self.bot.get_channel(cfg.LOG_CHANNEL_ID)

            self.bot.unload_extension(f'cogs.{extension_name}')
            await ctx.message.delete()
            await log_channel.send(
                f'{author.mention} has unloaded cog {extension_name}.')
        except commands.ExtensionNotLoaded:
            await ctx.send('**:x: Extension is not loaded.**')
        except Exception as e:
            log(self.bot, repr(e))
示例#6
0
def save_config(args):
    """Store the user configuration

    Create or overwrite the configuration file with the configuration extracted
    from the options -u and -t.
    """

    with open(_config_file, "w") as fh:
        tags = get_user_tags(args)
        json.dump(
            {
                "user": args.user,
                "tags": tags,
                "model": args.model or "content_based_1"
            },
            fh,
            indent=2,
        )
        log.log("Configuration saved in {}", _config_file)
示例#7
0
def get(url, delay=2, use_cache=True, max_delta=td(hours=12)):
    """Respectful wrapper around requests.get"""

    useragent = "Answerable v0.1"

    # If a cached answer exists and is acceptable, then return the cached one.

    cache_file = url.replace("/", "-")
    if use_cache:
        log("Checking cache before petition {}", fg(url, yellow))
        hit, path = cache.check("spider", cache_file, max_delta)
        if hit:
            with open(path, "r") as fh:
                res = fh.read().replace("\\r\\n", "")
            return _FalseResponse(200, res)

    # If the robots.txt doesn't allow the scraping, return forbidden status
    if not ask_robots(url, useragent):
        log(fg("robots.txt forbids {}", red), url)
        return _FalseResponse(403, "robots.txt forbids it")

    # Make the request after the specified delay
    # log("[{}] {}".format(fg("{:4.2f}".format(delay), yellow), url))
    log("Waiting to ask for {}", fg(url, yellow))
    log("  in {:4.2f} seconds", delay)
    sleep(delay)
    headers = {"User-Agent": useragent}
    log("Requesting")
    res = requests.get(url, timeout=10, headers=headers)
    # Exit the program if the scraping was penalized
    if res.status_code == 429:  # too many requests
        abort("Too many requests")

    # Cache the response if allowed by user
    if use_cache:
        cache.update("spider",
                     cache_file,
                     res.content.decode(res.encoding),
                     json_format=False)

    return res
示例#8
0
def get_questions(question_ids):
    """Retrieve questions from Stack Overflow

    - question_ids: list of question IDs

    Returns a list of objects with the following attributes:
      {
        "tags": [string],
        "answers": [ {"owner": {"user_id": int}} ],
        "score": int,
        "creation_date": timestamp,
        "question_id": int,
        "link": string,
        "title": string,
        "body": string (html)
      }
    """
    # about this request: https://api.stackexchange.com/docs/questions-by-ids#page=1&pagesize=100&order=desc&sort=creation&ids=67519195&filter=!)So8N7tfWBeyaWUex((*Ndu7tpA&site=stackoverflow
    api_request_f = "https://api.stackexchange.com//2.2/questions/{}?page={}&pagesize=100&order=desc&sort=creation&site=stackoverflow&filter=!)So8N7tfWBeyaWUex((*Ndu7tpA"
    max_ids = 100  # no more than 100 ids allowed at once
    k = math.ceil(len(question_ids) / max_ids)
    log(f"{len(question_ids)} questions, {k} batches")
    questions = []
    for i in range(k):
        log(f"batch {i+1}")
        batch_begin = i * max_ids
        batch_end = i * max_ids + max_ids
        subset = ";".join(question_ids[batch_begin:batch_end])
        page = 1
        while True:
            api_request = api_request_f.format(subset, page)
            response = spider.get(api_request, delay=0.5,
                                  use_cache=False)  # urls too long to cache
            if response.status_code != 200:
                abort(response)
            result = json.loads(response.content)
            questions += result["items"]
            if not result["has_more"]:
                break
            page += 1
    return questions
示例#9
0
def get_QA(user_id, force_reload=False, max_page=5):
    """Retrieve information about the questions answered by the user

    Return
        [
            (Question_1, Answer_1),
            (Question_2, Answer_2),
            ...
        ]
    See
        get_questions, get_user_answers
    """

    log(bold("Fetching user information"))
    if force_reload:
        log(fg("Force reload", magenta))
    cache_file = str(user_id) + ".json"
    # Check cache
    if not force_reload:
        hit, fpath = cache.check(cache_where, cache_file, cache_threshold)
        if hit:
            with open(fpath) as fh:
                stored = json.load(fh)
            return stored
    # Get the answers
    answers = get_user_answers(user_id, force_reload, max_page)

    # Get the questions
    q_ids = [str(a["question_id"]) for a in answers]
    questions = get_questions(q_ids)

    # Join answers and questions
    user_qa = [(q, a) for q in questions for a in answers
               if q["question_id"] == a["question_id"]]
    cache.update(cache_where, cache_file, user_qa)
    for q, a in user_qa:
        a["tags"] = q["tags"]

    ## Include questions specified by user
    try:
        with open("include.txt", "r") as f:
            extra_q_ids = f.read().split()
        log("Aditional training: " + str(extra_q_ids))
        extra_questions = get_questions(extra_q_ids)
    except FileNotFoundError:
        extra_questions = []
        log("No additional training specified by user")
    user_qa += [(q, None) for q in extra_questions]

    return user_qa
示例#10
0
def update(category: str, _file: str, obj, json_format=True):
    """Update or create a file in the cache

    Parameters:
    category: Folder inside the cache.
    _file: File name to store in.
    obj: Serializable object to store.
    """

    subpath = pathlib.Path(category) / _file
    path = pathlib.Path.cwd() / __cache_dir / subpath
    path.parent.mkdir(parents=True, exist_ok=True)
    try:
        with open(path, "w") as fh:
            if json_format:
                json.dump(obj, fh, indent=2)
            else:
                fh.write(obj)
        log("  Cache updated: {}", fg(subpath, green))
    except OSError as err:
        log("  {}: {}", err, fg(subpath, magenta))
        return False, path
示例#11
0
def get_feed(url, force_reload=False):
    """Get RSS feed and optionally remember to reduce bandwith"""

    useragent = "Answerable RSS v0.1"
    log("Requesting feed {}", fg(url, yellow))
    cache_file = url.replace("/", "_")

    # Get the conditions for the GET bandwith reduction
    etag = None
    modified = None
    if not force_reload:
        hit, path = cache.check("spider.rss", cache_file, td(days=999))
        if hit:
            with open(path, "r") as fh:
                headers = json.load(fh)
                etag = headers["etag"]
                modified = headers["modified"]
        log("with {}: {}", bold("etag"), fg(etag, yellow))
        log("with {}: {}", bold("modified"), fg(modified, yellow))

    # Get the feed
    feed = feedparser.parse(url, agent=useragent, etag=etag, modified=modified)

    # Store the etag and/or modified headers
    if feed.status != 304:
        etag = feed.etag if "etag" in feed else None
        modified = feed.modified if "modified" in feed else None
        new_headers = {
            "etag": etag,
            "modified": modified,
        }
        cache.update("spider.rss", cache_file, new_headers)
        log("Stored new {}: {}", bold("etag"), fg(etag, green))
        log("Stored new {}: {}", bold("modified"), fg(modified, green))

    return feed
示例#12
0
    def get_train_infos(self):
        """
        Get all trains infos
        :return:
        """
        print('service: train')
        file = 'train_infos'
        log(file=file, log_level=logging.INFO)

        logging.info('train_infos: Started')
        ''' Connect to the DB '''
        database = DB(self.db_path)
        db = DbBuilder(database)
        ''' Retrieve train add_on page crawled data '''
        J = Jourudan()
        json = J.routine()
        ''' Retrieve train add_on Sql  '''
        train = SqlOrder(json)
        result = train.insert_all_train_infos()
        ''' Pre-set var '''
        line_id = None
        station_id = None
        ''' Info logger '''
        max_line_id = 0
        max_start_end_stations_id = 0
        max_station_id = 0
        ''' Insert data in Db by train '''
        for train in result:
            for table_name, arg in result[train].items():
                if table_name == 'Train_lines':
                    line_id = db.insert_in_db(table_name, arg)
                    logging.info(f'trains =>     lines: {line_id}')

                    max_line_id = self.get_max_id(line_id, max_line_id)

                elif table_name == 'Train_Start_end_stations':
                    if not line_id:
                        logging.debug(
                            f'Start_end_stations =>     line_id: {line_id}')
                    arg['line_id'] = line_id
                    station_id = db.insert_in_db(table_name, arg)
                    logging.info(
                        f'trains =>     Start_end_stations: {station_id}')

                    max_start_end_stations_id = self.get_max_id(
                        station_id, max_start_end_stations_id)

                elif table_name == 'Train_trains':
                    if not line_id:
                        logging.debug(f'trains =>     line_id: {line_id}')
                    if not station_id:
                        logging.debug(
                            f'trains =>     station_id: {station_id}')
                    arg['line_id'] = line_id
                    arg['start_end_station_id'] = station_id
                    arg['now_time'] = datetime.datetime.now(
                        pytz.timezone('Asia/Tokyo')).strftime(
                            "%Y-%m-%d %H:%M:%S")
                    trains_id = db.insert_in_db(table_name, arg)
                    logging.info(f'trains =>     station_id: {trains_id}')

                    max_station_id = self.get_max_id(trains_id, max_station_id)
        logging.info(
            f'Lines: {max_line_id},  Start_end_stations: {max_start_end_stations_id},  Stations: {max_station_id}'
        )
        logging.info('train_infos: Finished')
示例#13
0
def check(category: str, _file: str, max_delta: td) -> (bool, pathlib.Path):
    """Return if a file is cached and where it is located.

    Returns:
    (B, P) where
    - B is true if the content is cached and usable
    - P is the path where the cached content is/should be.

    Parameters:
    category: Folder inside the cache.
    _file: File name to look for.
    max_delta: Timedelta used as threshold to consider a file too old.
    """

    # Prepare the path to the cached file
    subpath = pathlib.Path(category) / _file
    path = pathlib.Path.cwd() / __cache_dir / subpath
    path.parent.mkdir(parents=True, exist_ok=True)

    try:
        if not path.exists():
            log("  Miss {}", fg(subpath, magenta))
            return False, path
        else:
            # Check if the file is too old
            log("  Hit {}", fg(subpath, green))
            modified = dt.fromtimestamp(path.stat().st_mtime)
            now = dt.now()
            delta = now - modified
            log("  Time passed since last fetch: {}", delta)
            valid = delta < max_delta
            if valid:
                log(fg("  Recent enough", green))
            else:
                log(fg("  Too old", magenta))
            return valid, path
    except OSError as err:
        log("  {}: {}", err, fg(subpath, magenta))
        return False, path
示例#14
0
from dynamics.mav_dynamics import mavDynamics
from control.servo_stimulation import servo_stimulation
import parameters.simulation_parameters as SIM
from tools.autopilot_Command import autopilotCommand
localIP = '192.168.1.237'
raspIP = '192.168.1.38'
#sensors=Sensors()
# initialize the visualization
#ground=groundProxy()
#servo=servo_stimulation()
# initialize elements of the architecture
ctrl = autopilot(0.01)
#sensor_view = SensorViewer()  # initialize view of sensor data plots
#obsv = observer(SIM.ts_simulation)
path_follow = path_follower()
logger = log('Test flight.txt')
# path definition
from message_types.msg_path import msgPath
from message_types.msg_state import msgState
from message_types.msg_delta import msgDelta
path = msgPath()
state = msgState()  # instantiate state message
delta = msgDelta()
commandWindow = autopilotCommand()
commands = msgAutopilot()
wind = windSimulation(SIM.ts_simulation)
wind._steady_state = np.array([[5., 2., 0.]]).T  # Steady wind in NED frame
mav = mavDynamics(SIM.ts_simulation, localIP, raspIP)
# path.type = 'line'
path.type = 'orbit'
if path.type == 'line':
示例#15
0
def recommend(args):
    """Recommend questions from the latest unanswered"""

    filtered = {"hidden": 0, "closed": 0, "duplicate": 0}

    def valid_entry(entry):
        """Check if a entry should be taken into account"""

        if len(set(entry["tags"]) & hide_tags) > 0:
            filtered["hidden"] += 1
            return False
        if entry["title"][-8:] == "[closed]":
            filtered["closed"] += 1
            return False
        if entry["title"][-11:] == "[duplicate]":
            filtered["duplicate"] += 1
            return False
        return True

    def cf(x):
        """Color a value according to its value"""

        return (displayer.fg(x, displayer.green) if x == 0 else displayer.fg(
            x, displayer.magenta))

    # Load configuration
    config = load_config(args)

    # Load the model
    try:
        model_name = config["model"]
        log.log("Loading model {}", displayer.fg(model_name, displayer.yellow))
        model = importlib.import_module(f".{model_name}", "models")
        log.log("Model {} succesfully loaded",
                displayer.fg(model_name, displayer.green))
    except ModuleNotFoundError as err:
        if err.name == f"models.{model_name}":
            log.abort("Model {} not present", model_name)
        else:
            log.abort("Model {} unsatisfied dependency: {}", model_name,
                      err.name)

    # Get user info and feed
    user_qa = fetcher.get_QA(config["user"], force_reload=args.f)
    if args.all or "tags" not in config:
        tags = ""
    else:
        tags = "tag?tagnames="
        tags += "%20or%20".join(config["tags"]["followed"]).replace("+", "%2b")
        tags += "&sort=newest"
    url = "https://stackoverflow.com/feeds/" + tags
    try:
        feed = fetcher.get_question_feed(url, force_reload=args.F)
        if len(feed) == 0:
            raise ValueError("No feed returned")
        # Filter feed from ignored tags
        hide_tags = (set() if args.all or "tags" not in config else set(
            config["tags"]["ignored"]))
        useful_feed = [e for e in feed if valid_entry(e)]
        if len(useful_feed) == 0:
            raise ValueError("All feed filtered out")
        log.log(
            "Discarded: {} ignored | {} closed | {} duplicate",
            cf(filtered["hidden"]),
            cf(filtered["closed"]),
            cf(filtered["duplicate"]),
        )

        # Make the recommendation
        log.log(f"Corpus size: {len(user_qa)} Feed size: {len(useful_feed)}")
        rec_index, info = model.recommend(user_qa, useful_feed)
        selection = [useful_feed[i] for i in rec_index[:args.limit]]
        if args.info and info is None:
            log.warn("Info requested, but model {} returns None", model_name)
        elif args.info and info is not None:
            info = [info[i] for i in rec_index[:args.limit]]
        displayer.disp_feed(selection, info, args.info)
    except ValueError as err:
        log.warn(err)
        log.print_advice()
示例#16
0
    )
    args = parser.parse_args()
    if args.no_ansi:
        displayer.ansi = False
    return args


if __name__ == "__main__":
    _latest_version = latest_version()
    if _latest_version is not None and _latest_version != _current_version:
        log.warn(
            f"New version on GitHub: {_latest_version} (current is {_current_version})"
        )
    switch = {
        "save": save_config,
        "summary": summary,
        "recommend": recommend,
    }
    args = parse_arguments()
    command = args.command

    log.add_log("answerable.log")
    if args.verbose:
        log.add_stderr()

    log.log(displayer.bold("Log of {}"), datetime.datetime.now())

    switch[command](args)

    log.close_logs()
示例#17
0
import parameters.simulation_parameters as SIM

from video.mav_viewer import mavViewer
from kinematics.data_viewer import dataViewer
from dynamics.mav_dynamics import mavDynamics
from dynamics.wind_simulation import windSimulation
from tools.log import log
from message_types.msg_delta import msgDelta
from tools.joystick_Input import joystick

controlJoystick = joystick(True)
# initialize the visualization
VIDEO = False  # True==write video, False==don't write video
mav_view = mavViewer()  # initialize the mav viewer
data_view = dataViewer()  # initialize view of data plots
logFile = log('dados_log.txt')
if VIDEO is True:
    from video.video_writer import videoWriter
    video = videoWriter(video_name="chap4_video.avi",
                        bounding_box=(0, 0, 1000, 1000),
                        output_rate=SIM.ts_video)

# initialize elements of the architecture
wind = windSimulation(SIM.ts_simulation)
wind._steady_state = np.array([[5., 2., 0.]]).T  # Steady wind in NED frame
mav = mavDynamics(SIM.ts_simulation)

# initialize the simulation time
sim_time = SIM.start_time
plot_time = sim_time
示例#18
0
def train_model(model, train_dataset, val_dataset, learning_rate, epochs,
                layers):
    """Train the model.
    train_dataset, val_dataset: Training and validation Dataset objects.
    learning_rate: The learning rate to train with
    epochs: Number of training epochs. Note that previous training epochs
            are considered to be done alreay, so this actually determines
            the epochs to train in total rather than in this particaular
            call.
    layers: Allows selecting wich layers to train. It can be:
        - A regular expression to match layer names to train
        - One of these predefined values:
          heaads: The RPN, classifier and mask heads of the network
          all: All the layers
          3+: Train Resnet stage 3 and up
          4+: Train Resnet stage 4 and up
          5+: Train Resnet stage 5 and up
    """

    # Pre-defined layer regular expressions
    layer_regex = {
        # all layers but the backbone
        "heads":
        r"(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)",
        # From a specific Resnet stage and up
        "3+":
        r"(fpn.C3.*)|(fpn.C4.*)|(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)",
        "4+":
        r"(fpn.C4.*)|(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)",
        "5+":
        r"(fpn.C5.*)|(fpn.P5\_.*)|(fpn.P4\_.*)|(fpn.P3\_.*)|(fpn.P2\_.*)|(rpn.*)|(classifier.*)|(mask.*)",
        # All layers
        "all": ".*",
    }
    if layers in layer_regex.keys():
        layers = layer_regex[layers]

    # Data generators
    train_set = Dataset(train_dataset, model.config, augment=True)
    train_generator = torch.utils.data.DataLoader(train_set,
                                                  batch_size=1,
                                                  shuffle=True,
                                                  num_workers=4)
    val_set = Dataset(val_dataset, model.config, augment=True)
    val_generator = torch.utils.data.DataLoader(val_set,
                                                batch_size=1,
                                                shuffle=True,
                                                num_workers=4)

    # Train
    log("\nStarting at epoch {}. LR={}\n".format(model.epoch + 1,
                                                 learning_rate))
    log("Checkpoint Path: {}".format(model.checkpoint_path))
    set_trainable(model, layers)

    # Optimizer object
    # Add L2 Regularization
    # Skip gamma and beta weights of batch normalization layers.
    trainables_wo_bn = [
        param for name, param in model.named_parameters()
        if param.requires_grad and not 'bn' in name
    ]
    trainables_only_bn = [
        param for name, param in model.named_parameters()
        if param.requires_grad and 'bn' in name
    ]
    optimizer = optim.SGD([{
        'params': trainables_wo_bn,
        'weight_decay': model.config.SOLVER.WEIGHT_DECAY
    }, {
        'params': trainables_only_bn
    }],
                          lr=learning_rate,
                          momentum=model.config.SOLVER.MOMENTUM)

    for epoch in range(model.epoch + 1, epochs + 1):
        log("Epoch {}/{}.".format(epoch, epochs))

        # Training
        loss, loss_rpn_class, loss_rpn_bbox, loss_mrcnn_class, loss_mrcnn_bbox, loss_mrcnn_mask = train_epoch(
            model, train_generator, optimizer,
            model.config.TRAIN.STEPS_PER_EPOCH)

        # Validation
        val_loss, val_loss_rpn_class, val_loss_rpn_bbox, val_loss_mrcnn_class, val_loss_mrcnn_bbox, val_loss_mrcnn_mask = valid_epoch(
            model, val_generator, model.config.TRAIN.VALIDATION_STEPS)

        # Statistics
        model.loss_history.append([
            loss, loss_rpn_class, loss_rpn_bbox, loss_mrcnn_class,
            loss_mrcnn_bbox, loss_mrcnn_mask
        ])
        model.val_loss_history.append([
            val_loss, val_loss_rpn_class, val_loss_rpn_bbox,
            val_loss_mrcnn_class, val_loss_mrcnn_bbox, val_loss_mrcnn_mask
        ])
        visualize.plot_loss(model.loss_history,
                            model.val_loss_history,
                            save=True,
                            log_dir=model.log_dir)

        # Save model
        torch.save(model.state_dict(), model.checkpoint_path.format(epoch))

    model.epoch = epochs