Пример #1
0
import os.path

import tensorflow as tf

from src.components.networks import Networks
from src.components.placeholders import Placeholders

from src.utils.utils import get_logger

logger = get_logger("savers")


class Saver:
    def __init__(self,
                 variable_list,
                 save_path,
                 init_path=None,
                 name="Unnamed Graph"):
        try:
            self.save_path = save_path
            if not os.path.isdir(self.save_path):
                os.makedirs(self.save_path)
            self.init_path = init_path
            self.name = name
            self.saver = tf.train.Saver(variable_list)
        except Exception as e:
            logger.warning(f"Could not create Saver for {name}!")

    def load(self, session):
        try:
            self.saver.restore(session,
Пример #2
0
import os
from typing import List, Optional
os.environ["MKL_NUM_THREADS"] = "1" 
os.environ["NUMEXPR_NUM_THREADS"] = "1" 
os.environ["OMP_NUM_THREADS"] = "1"
from pathlib import Path as P
import hydra
from omegaconf import DictConfig

from src.utils import utils
log = utils.get_logger(__name__)

import numpy as np
np.random.seed(1)
import random
random.seed(1)
import cv2
import timeit
from os import path, makedirs, listdir
import sys
sys.setrecursionlimit(10000)
from multiprocessing import Pool

from shapely.wkt import loads
from shapely.geometry import mapping, Polygon

# import matplotlib.pyplot as plt
# import seaborn as sns

import json
Пример #3
0
import tensorflow as tf
from src.utils.utils import get_logger
from src.nets import ops

logger = get_logger("generator")


class Generator(object):
    def __init__(self, name, is_train, norm='instance', activation='relu', unet=False):
        logger.info(f"Initializing Generator {name}", )
        self.name = name
        self._is_train = is_train
        self._norm = norm
        self._activation = activation
        self._num_res_block = 10
        self._reuse = False
        self._unet = unet

    def __call__(self, input, return_code_layer=False):
        with tf.variable_scope(self.name, reuse=self._reuse):
            C1 = ops.conv_block(input, 32, 'c7s1-32', 7, 1, self._is_train,
                                self._reuse, self._norm, self._activation, pad='REFLECT')
            C2 = ops.conv_block(C1, 64, 'd64', 3, 2, self._is_train,
                                self._reuse, self._norm, self._activation)
            C3 = ops.conv_block(C2, 128, 'd128', 3, 2, self._is_train,
                                self._reuse, self._norm, self._activation)

            G = C3
            for i in range(self._num_res_block):
                if i == self._num_res_block // 2 and return_code_layer:
                    return G
Пример #4
0
import os

from src.utils import utils
from src.lightning.model import HatefulMemesModel

LOGGER = utils.get_logger(__name__)

CHECKPOINT = os.environ.get("CHECKPOINT")
TEST_PATH = os.environ.get("TEST_PATH")


def main():
    hateful_memes_model = HatefulMemesModel.load_from_checkpoint(CHECKPOINT)
    submission = hateful_memes_model.make_submission_frame(TEST_PATH)
    LOGGER.info(submission.head())


if __name__ == "__main__":
    main()
Пример #5
0
import os
from datetime import datetime

import tensorflow as tf

from src.data_loader import get_training_datasets
from src.cycle_gan import CycleGan
import src.utils.argument_parser as argument_parser
from src.utils.utils import get_logger, makedirs

logger = get_logger("main")


def is_video_data(train_A):
    return len(train_A.output_shapes) is 5 and (train_A.output_shapes[1] > 1)


def  train(model, train_A, train_B, logdir, learning_rate):
    next_a = train_A.make_one_shot_iterator().get_next()
    next_b = train_B.make_one_shot_iterator().get_next()
    variables_to_save = tf.global_variables()
    init_op = tf.variables_initializer(variables_to_save)
    init_all_op = tf.global_variables_initializer()

    var_list_fnet = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope='fnet')
    fnet_loader = tf.train.Saver(var_list_fnet)

    summary_writer = tf.summary.FileWriter(logdir)

    def initialize_session(sess):
        logger.info('Initializing all parameters.')
Пример #6
0
import tensorflow as tf
from src.utils.utils import get_logger
from src.nets import ops

logger = get_logger("discriminator")


class Discriminator(object):
    def __init__(self, name, is_train, norm='instance', activation='leaky'):
        logger.info(f"Initializing Discriminator {name}...")
        self.name = name
        self._is_train = is_train
        self._norm = norm
        self._activation = activation
        self._reuse = False

    def __call__(self, input, return_layer_activations=False):
        layer_activations = []
        with tf.variable_scope(self.name, reuse=self._reuse):
            D = ops.conv_block(input, 64, 'C64', 4, 2, self._is_train,
                               self._reuse, norm=None, activation=self._activation)
            layer_activations.append(D)
            D = ops.conv_block(D, 128, 'C128', 4, 2, self._is_train,
                               self._reuse, self._norm, self._activation)
            layer_activations.append(D)
            D = ops.conv_block(D, 256, 'C256', 4, 2, self._is_train,
                               self._reuse, self._norm, self._activation)
            layer_activations.append(D)
            D = ops.conv_block(D, 512, 'C512', 4, 2, self._is_train,
                               self._reuse, self._norm, self._activation)
            layer_activations.append(D)
Пример #7
0
import os.path
import tensorflow as tf
import numpy as np
from glob import glob as get_all_paths
from src.utils.utils import get_logger

from src.video_preprocessor import preprocess_videos
from src.utils.utils import contains_videos

dataset_names = ['trainA', 'trainB']
preferred_image_format_file_ending = 'jpg'
supported_image_format_file_ending = 'png'
video_format_file_ending = 'mp4'
video_index_padding = 1 + 6 + 1

logger = get_logger("data_loader")


def get_training_datasets(task_name,
                          image_size,
                          batch_size,
                          dataset_dir="datasets",
                          frame_sequence_length=3,
                          force_video=False) -> [tf.data.Dataset]:
    with tf.device('/cpu:0'):
        verify_directory_structure(task_name, dataset_dir)
        image_path_tensors = get_image_paths(task_name, dataset_dir,
                                             frame_sequence_length,
                                             force_video)
        datasets = build_datasets(image_path_tensors, image_size, batch_size)
        return datasets
Пример #8
0
from typing import Any, List, Optional, Dict

import torch
from hydra.utils import instantiate
from omegaconf import DictConfig
from pytorch_lightning import LightningModule
from pytorch_lightning.utilities.types import EPOCH_OUTPUT
from torch import nn
from transformers import AutoModel, AutoConfig, get_scheduler

from src.utils.utils import get_logger

log = get_logger(__name__)


class ContrastiveModule(LightningModule):
    def __init__(
        self,
        arch: str,
        temperature: float,
        criterion: str,
        optcfg: DictConfig,
        num_negatives: int,
        num_positives: Optional[int] = None,
        schcfg: Optional[DictConfig] = None,
        **kwargs,
    ):
        super().__init__()

        # this line ensures params passed to LightningModule will be saved to ckpt
        # it also allows to access params with 'self.hparams' attribute
Пример #9
0
import cv2
import os

from src.utils.utils import get_all_video_paths
from src.utils.utils import get_logger

logger = get_logger("video_preprocessor")

frame_directory_name = 'frames'


def preprocess_videos(path):
    frame_dir = os.path.join(path, frame_directory_name)
    video_paths = get_all_video_paths(path)

    if os.path.exists(frame_dir):
        logger.info("Frame directory already exists, no preprocessing needed!")
        return
    else:
        os.makedirs(frame_dir)

    logger.info(f"Preprocessing {len(video_paths)} videos.")
    for path in video_paths:
        logger.info(f"Preprocessing {path}...")
        extract_video_frames(path, frame_dir)


def extract_video_frames(video_path, frame_directory):
    videoCapture = cv2.VideoCapture(video_path)
    if not videoCapture.isOpened():
        logger.info('Error opening video {}'.format(video_path))