Пример #1
0
 def __init__(self, globalConfig={}, config={}):
     self.globalConfig = Configuration(globalConfig, GLOBAL_DEFAULTS)
     self.config = Configuration(config, DEFAULTS)
     self.modelConfig = Configuration.load(self.config["model_path"],
                                           "algorithm")
     self._configure_dataset()
     self._configure_algorithm()
     self._configure_executor()
 def __init__(self, regions, model_path, data_config={}):
     self.model_path = model_path
     self._load_vocab()
     self._load_meta()
     self._scaling = 1.0
     self._max_height = 10000
     self._max_width = 10000
     self.set_regions(regions)
     self.data_config = Configuration(data_config, DEFAULT_DATACONFIG)
     self.augmenter = ImageAugmenter(self.data_config)
Пример #3
0
    def __init__(self, **kwargs):
        self.paper_note_path = kwargs.get('paper_note_path',
                                          '../paper-notes/data/words')
        self.meta = Configuration(kwargs.get('meta', {}))
        self.data_config = Configuration(kwargs.get('data_config', {}))
        self.vocab = kwargs.get('vocab', {})
        self.pure = kwargs.get('pure', True)

        self.max_length = kwargs.get('max_length')
        self._load_data()
        self._compile_sets()
        self.augmenter = ImageAugmenter(self.data_config)
Пример #4
0
class RegionVisualizer(object):
    def __init__(self, config={}):
        self.config = Configuration(config, DEFAULT_CONFIG)

    def __call__(self, image, regions, is_gt=False):
        for region in regions:
            self._viz_region(image, region, is_gt)
        return image

    def _draw_lines(self, image, region, color):
        if (len(region.path) > 0):
            if not self.config["filled"]:
                cv2.polylines(image, [np.array(region.path)], 1, color)
            else:
                cv2.fillPoly(image, [np.array(region.path)], color)
        else:
            cv2.rectangle(image, region.pos, region.get_bottom_right(), color,
                          1 if not self.config["filled"] else -1)

    def _color(self, region, is_gt=False):
        if is_gt:
            return (0, 255, 0)
        return (255, 0,
                0) if region.cls is not None and region.cls == 0 else (0, 0,
                                                                       255)

    def _draw_text(self, image, region, color):
        if region.text is not None and (region.cls is None or region.cls
                                        == 1) and self.config.default(
                                            "text", True):
            x, y = region.pos
            scale = 2 if self.config["large"] else 1
            thickness = 2 if self.config["large"] else 1
            reloc = 5 * scale
            # place text below if there is not enough space above
            y = y + reloc + region.size[1] if y - (20 +
                                                   reloc) < 0 else y - reloc
            cv2.putText(image, region.text, (x, y), cv2.FONT_HERSHEY_PLAIN,
                        scale, color, thickness)

    def _viz_region(self, image, region, is_gt=False):
        color = self._color(region, is_gt)
        self._draw_lines(image, region, color)
        self._draw_text(image, region, color)

    def store(self, vizimage, original_file):
        if self.config.default("store", False):
            os.makedirs(self.config["store"], exist_ok=True)
            filename = os.path.basename(original_file)
            cv2.imwrite(os.path.join(self.config["store"], filename), vizimage)
Пример #5
0
 def __init__(self, img, config={}):
     self.img = img
     self.step = 1
     self.start = None
     self.width = img.shape[1]
     self.height = img.shape[0]
     self.config = Configuration(config, DEFAULTS)
Пример #6
0
 def __init__(self, name, transpose=True, data_config={}):
     self.name = name
     self.data_config = Configuration(data_config)
     self.min_width_factor = 15
     self.max_min_width = 400
     self.datapath = os.path.join(util.OUTPUT_PATH, name)
     self._load_vocab()
     self._load_meta()
     self._load_sets()
     self._calc_max_length()
     self._compile_sets()
     self.transpose = transpose
     self.channels = 1
     self._fill_meta()
     self.augmenter = ImageAugmenter(self.data_config)
     self.unfiltered = {}
Пример #7
0
class Layer(object):

    __metaclass__ = abc.ABCMeta

    def __init__(self, config, defaults, data_format='nhwc'):
        self._config = Configuration(config)
        self._defaults = Configuration(defaults)
        self._format = data_format

    def __getitem__(self, key):
        default = self._defaults.default(key, None)
        return self._config.default(key, default)

    def _parse_format(self):
        return 'channels_first' if self._format == 'nchw' else 'channels_last'

    @abc.abstractmethod
    def __call__(self, x, is_train):
        pass
class SeparatedVisualizer(object):
    def __init__(self, config={}):
        self.config = Configuration(config, DEFAULT_CONFIG)

    def __call__(self, original, merged, is_gt=False):
        if len(original.shape) > 2 and original.shape[2] == 3:
            original = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
        return np.concatenate((original, merged), axis=1)

    def store(self, vizimage, original_file):
        if self.config.default("store", False):
            os.makedirs(self.config["store"], exist_ok=True)
            filename = os.path.basename(original_file)
            cv2.imwrite(os.path.join(self.config["store"], filename), vizimage)

    def store(self, vizimage, original_file):
        if self.config.default("store", False):
            os.makedirs(self.config["store"], exist_ok=True)
            filename = os.path.basename(original_file)
            cv2.imwrite(os.path.join(self.config["store"], filename), vizimage)
class AlgorithmBase(object):

    __metaclass__ = abc.ABCMeta

    _cpu = False

    def set_cpu(self, is_cpu):
        self._cpu = is_cpu

    def __init__(self, config, defaults):
        self._config = Configuration(config)
        self._defaults = Configuration(defaults)

    def __getitem__(self, key):
        default = self._defaults.default(key, None)
        return self._config.default(key, default)

    @abc.abstractmethod
    def build_graph():
        pass
Пример #10
0
class ImageVisualizer(object):
    def __init__(self, config={}):
        self.config = Configuration(config, DEFAULT_CONFIG)

    def __call__(self, original, merged, is_gt=False):
        return merged

    def store(self, vizimage, original_file):
        if self.config.default("store", False):
            os.makedirs(self.config["store"], exist_ok=True)
            filename = os.path.basename(original_file)
            cv2.imwrite(os.path.join(self.config["store"], filename), vizimage)
Пример #11
0
 def __init__(self, **kwargs):
     self.paper_note_path = kwargs.get(
         'paper_note_path', '../paper-notes/data/final')
     self.slice_width = kwargs.get('slice_width', 320)
     self.slice_height = kwargs.get('slice_height', 320)
     self.filter = kwargs.get('filter', True)
     self.binarize = kwargs.get('binarize', False)
     self.single_page = kwargs.get('single_page', False)
     self.slicer = Slicer(**kwargs)
     self.meta = Configuration({})
     self.shuffle = kwargs.get('shuffle', True)
     self.vocab = {}
     self._load_filelists()
     self.augmenter = ImageAugmenter(kwargs.get('config', {
         "otf_augmentations": {}
     }))
     self.otf_mentioned = False
Пример #12
0
    def configure(self, config={}):
        config = Configuration(config)

        def conf(key):
            return config.default(key, self.DEFAULTS.default(key, 1))

        self.translate(prob=conf('translate.prob'),
                       center=conf('translate.center'),
                       stdv=conf('translate.stdv'))
        self.rotate(prob=conf('rotate.prob'),
                    center=conf('rotate.center'),
                    stdv=conf('rotate.stdv'))
        self.shear(prob=conf('shear.prob'),
                   center=conf('shear.center'),
                   stdv=conf('shear.stdv'))
        self.scale(prob=conf('scale.prob'),
                   center=conf('scale.center'),
                   stdv=conf('scale.stdv'))
 def __init__(self, config={}):
     self.config = Configuration(config, DEFAULT_CONFIG)
     self._build_dictionary()
 def __init__(self, config={}):
     self.config = Configuration(config, DEFAULTS)
     self.region_extractor = WordDetector(self.config["extractor"])
Пример #15
0
import os
import sys
runpath=os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(runpath, '..'))

import binascii
import re
from netfilterqueue import NetfilterQueue
from DatabaseLayer import selectAllFrom
from lib.Configuration import Configuration as conf

HoneyTokens=[]
db=conf.getDB()

def readData():
  try:
    global HoneyTokens
    HoneyTokens=selectAllFrom(db, "HoneyTokens")
    print("imported %s honeytokens"%len(HoneyTokens))
  except Exception as e:
    print("An error occured: %s"%e)

def checkTraffic(pkt):
  try:
    for x in HoneyTokens:
      check = re.compile(x["token"], re.IGNORECASE) if x['caseinsensitive'] else re.compile(x["token"])
      if(check.search(pkt.get_payload())):
        if x["action"].lower() == "drop":
          print("Packet dropped!")
          pkt.drop()
          return
Пример #16
0
class PaperNoteWords(Dataset):
    def __init__(self, **kwargs):
        self.paper_note_path = kwargs.get('paper_note_path',
                                          '../paper-notes/data/words')
        self.meta = Configuration(kwargs.get('meta', {}))
        self.data_config = Configuration(kwargs.get('data_config', {}))
        self.vocab = kwargs.get('vocab', {})
        self.pure = kwargs.get('pure', True)

        self.max_length = kwargs.get('max_length')
        self._load_data()
        self._compile_sets()
        self.augmenter = ImageAugmenter(self.data_config)

    def info(self):
        pass

    def _compile_set(self, dataset):
        for item in self.data[dataset]:
            item['compiled'] = self.compile(item['truth'])

    def _compile_sets(self):
        self._compile_set("train")
        self._compile_set("dev")
        self._compile_set("test")

    def _load_data(self):
        prefix = "pure_" if self.pure else ""
        self.data = {
            "dev": self._load_wordlist("{}dev".format(prefix)),
            "train": self._load_wordlist("{}train".format(prefix)),
            "test": self._load_wordlist("{}test".format(prefix)),
            "print_dev": self._load_classlist("dev"),
            "print_test": self._load_classlist("test"),
            "print_train": self._load_classlist("train"),
        }

    def _load_wordlist(self, subset):
        basepath = os.path.join(self.paper_note_path, subset)
        words = util.loadJson(basepath, "words")
        parsed = []
        for word in words:
            parsed.append(
                self._fileobj(basepath, "{}.png".format(word), words[word]))
        return parsed

    def _load_classlist(self, subset):
        files = self._load_filelist(subset, 1)
        files.extend(
            self._load_filelist("print_{}".format(subset), 0, len(files)))
        return files

    def _load_filelist(self, subset, is_htr, length=None) -> list:
        basepath = os.path.join(self.paper_note_path, subset)
        if os.path.exists(basepath):
            all_files = os.listdir(basepath)
            shuffle(all_files)
            length = len(all_files) if length is None else min(
                length, len(all_files))
            files = list(
                filter(lambda x: x.endswith(".png"), all_files[:length]))
            return list(
                map(lambda x: self._fileobj(basepath, x, is_htr), files))
        return []

    def _fileobj(self, basepath: str, filename: str, truth):
        return {
            "path": os.path.join(basepath, filename),
            "truth": truth,
        }

    def compile(self, text):
        parsed = [self.vocab[1][c] for c in text]
        parsed.extend([-1] * (self.max_length - len(text)))
        return parsed

    def decompile(self, values):
        def getKey(key):
            try:
                return self.vocab[0][str(key)]
            except KeyError:
                return ''

        return ''.join([getKey(c) for c in values])

    def getBatchCount(self, batch_size, max_batches=0, dataset="train"):
        total_len = len(self.data[dataset])
        num_batches = int(math.ceil(float(total_len) / batch_size))
        return min(num_batches,
                   max_batches) if max_batches > 0 else num_batches

    def generateBatch(self,
                      batch_size,
                      max_batches=0,
                      dataset="train",
                      with_filepath=False,
                      augmentable=False):
        num_batches = self.getBatchCount(batch_size, max_batches, dataset)
        if self.data_config.default('shuffle_epoch', False):
            shuffle(self.data[dataset])
        for b in range(num_batches):
            yield self._load_batch(b,
                                   batch_size,
                                   dataset,
                                   with_filepath,
                                   augmentable=augmentable)
        pass

    def load_image(self, path, transpose=False, augmentable=False):
        target_size = (
            int(self.meta["height"] -
                (self.data_config.default('preprocess.padding', 0) * 2)),
            int(self.meta["width"] -
                (self.data_config.default('preprocess.padding', 0) * 2)))
        x = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
        if x is None or x.shape[0] == 0 or x.shape[1] == 0:
            return None
        x = self.augmenter.preprocess(x, target_size)
        if x is None:
            return None
        if self.data_config.default("otf_augmentations",
                                    False) and augmentable:
            x = self.augmenter.augment(x)
        else:
            x = self.augmenter.add_graychannel(x)

        if x.shape[1] != self.meta["width"] or x.shape[0] != self.meta[
                "height"]:
            x = self.augmenter.pad_to_size(x,
                                           width=self.meta["width"],
                                           height=self.meta["height"])

        return self.augmenter.add_graychannel(x)

    def _loadline(self, line, transpose=True, augmentable=False):
        l = len(line["truth"])
        y = np.asarray(line["compiled"])
        x = self.load_image(line["path"], augmentable=augmentable)
        return x, y, l, line["path"]

    def _loadprintline(self, line, transpose=True, augmentable=False):
        y = line["truth"]
        x = self.load_image(line["path"], augmentable=augmentable)
        return x, [y], 0, line["path"]

    def _load_batch(self,
                    index,
                    batch_size,
                    dataset,
                    with_filepath=False,
                    augmentable=False):
        X = []
        Y = []
        L = []
        F = []

        parseline = self._loadline if not dataset.startswith(
            "print_") else self._loadprintline

        for idx in range(
                index * batch_size,
                min((index + 1) * batch_size, len(self.data[dataset]))):
            x, y, l, f = parseline(self.data[dataset][idx],
                                   augmentable=augmentable)
            if x is not None:
                X.append(x)
                Y.append(y)
                L.append(l)
                F.append(f)
        X = np.asarray(X)
        Y = np.asarray(Y)
        L = np.asarray(L)
        if not with_filepath:
            return X, Y, L
        else:
            return X, Y, L, F

    # deprecated

    def generateEpochs(self,
                       batch_size,
                       num_epochs,
                       max_batches=0,
                       dataset="train",
                       with_filepath=False,
                       augmentable=False):
        for e in range(num_epochs):
            yield self.generateBatch(batch_size,
                                     max_batches=max_batches,
                                     dataset=dataset,
                                     with_filepath=with_filepath,
                                     augmentable=augmentable)
Пример #17
0
        '-a',
        metavar='action',
        help='Action to take when triggered (accept/block/drop)')
    parser.add_argument('-d',
                        metavar='database',
                        help='Database to be modified')
    parser.add_argument('-I', action='store_true', help='Case Insensitive')
    parser.add_argument('-B',
                        action='store_true',
                        help='Binary Blob (enter in hex)')
    parser.add_argument('-N',
                        action='store_true',
                        help='Notify - Alert the user right away')
    args = parser.parse_args()

    db = args.d if args.d else conf.getDB()

    if args.L:
        for x in conf.getTables():
            print("=" * 80 + "\n%s\n" % (x) + "=" * 80)
            for y in selectAllFrom(db, x):
                sys.stdout.write("|  ")
                for z in sorted(y.keys()):
                    sys.stdout.write("%s: %s  |  " % (z, y[z]))
                print("")
    elif args.A:
        if args.t:
            # if args.B (Binary), get the clean hex version
            token = args.t if not args.B else is_hex(args.t)
            action = args.a.lower() if args.a else conf.getDefaultAction()
            alert = True if args.N else False
    parser.add_argument('--logplacement',
                        help='Log Device placement',
                        action='store_true',
                        default=False)
    parser.add_argument('--model-date',
                        help='date to continue for',
                        default='')
    parser.add_argument('--model-epoch',
                        help='epoch to continue for',
                        default=0,
                        type=int)
    args = parser.parse_args()

    # TRAINING
    logger = Logger()
    config = Configuration.load(SEP_CONFIG_PATH, args.config)
    config()
    algorithm = TFUnet(config['algo_config'])
    algorithm.configure(learning_rate=config['learning_rate'],
                        slice_width=config['data_config.slice_width'],
                        slice_height=config['data_config.slice_height'])
    executor = Executor(algorithm, True, config, logger=logger)
    dataset = PaperNoteSlices(paper_note_path=config.default(
        'data_config.paper_note_path', '../paper-notes/data/final'),
                              filter=config['data_config.filter'],
                              slice_width=config['data_config.slice_width'],
                              slice_height=config['data_config.slice_height'],
                              binarize=config.default('binary', False),
                              config=config['data_config'])

    log_name = '{}-{}'.format(config["name"],
Пример #19
0
 def __init__(self, config={}):
     self.config = Configuration(config, DEFAULT_CONFIG)
Пример #20
0
                    'gt.viz', False):
                vizimage = self.viz(vizimage, gt, True)
            if len(self.blocks) > 0:
                vizimage = self.viz(vizimage, res["result"], False)
            self.viz.store(vizimage, file)
            res["viz"] = vizimage
        if len(self.evals) > 0:
            for evl in self.evals:
                scores = evl(gt, res["result"])
                for score_key in scores.keys():
                    self.scores[score_key] = [
                        scores[score_key]
                    ] if score_key not in self.scores else [
                        scores[score_key], *self.scores[score_key]
                    ]
        return res


if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--config')
    parser.add_argument('--gpu',
                        help='Runs scripts on gpu. Default is cpu.',
                        default=-1,
                        type=int)
    args = parser.parse_args()
    config = Configuration.load("./config/e2e/", args.config)
    e2e = E2ERunner(config, {"gpu": args.gpu})
    e2e()
class RegionDataset(Dataset):
    def __init__(self, regions, model_path, data_config={}):
        self.model_path = model_path
        self._load_vocab()
        self._load_meta()
        self._scaling = 1.0
        self._max_height = 10000
        self._max_width = 10000
        self.set_regions(regions)
        self.data_config = Configuration(data_config, DEFAULT_DATACONFIG)
        self.augmenter = ImageAugmenter(self.data_config)

    def info(self):
        self.meta('Dataset Configuration')

    def scaling(self, scaling, max_height, max_width):
        self.augmenter.config['preprocess.scale'] = scaling
        self._max_height = max_height
        self._max_width = max_width

    def _load_meta(self):
        self.meta = Configuration(util.loadJson(self.model_path, "data_meta"))

    def _load_vocab(self):
        self.vocab = util.loadJson(self.model_path, "vocab")
        self.vocab_length = len(self.vocab[0])

    def _load_sets(self):
        self.data = np.asarray(
            list(
                filter(lambda x: x is not None,
                       [self._loadimage(region) for region in self.regions])))

    def _loadimage(self, region):
        if region.img.shape[0] == 0 or region.img.shape[1] == 0:
            img = np.zeros((self.meta["height"], self.meta["width"]))
        elif len(region.img.shape) > 2:
            img = cv2.cvtColor(region.img, cv2.COLOR_BGR2GRAY)
        else:
            img = region.img
        target_size = (
            int(self.meta["height"] -
                (self.data_config.default('preprocess.padding', 0) * 2)),
            int(self.meta["width"] -
                (self.data_config.default('preprocess.padding', 0) * 2)))
        img = self.augmenter.preprocess(img, target_size)
        if img is not None:
            img = self.augmenter.postprocesss(img)
        if img is None:
            img = np.zeros((self.meta["height"], self.meta["width"]))
        return self.augmenter.add_graychannel(img)

    def set_regions(self, regions):
        self.regions = regions
        if regions is not None:
            self._load_sets()

    def compile(self, text):
        parsed = [self.vocab[1][c] for c in text]
        parsed.extend([-1] * (self.max_length - len(text)))
        return parsed

    def decompile(self, values):
        def getKey(key):
            try:
                return self.vocab[0][str(key)]
            except KeyError:
                return ''

        return ''.join([getKey(c) for c in values])

    def _load_batch(self, index, batch_size, dataset, with_filepath=False):
        batch_data = np.asarray(
            self.data[index * batch_size:min((index + 1) *
                                             batch_size, len(self.data))])
        if with_filepath:
            return batch_data, [], [], []
        else:
            return batch_data, [], []

    def generateBatch(self,
                      batch_size=0,
                      max_batches=0,
                      dataset="",
                      with_filepath=False):
        num_batches = self.getBatchCount(batch_size, max_batches, "")
        for b in range(num_batches):
            yield self._load_batch(b, batch_size, "", with_filepath)
        pass

    def generateEpochs(self,
                       batch_size,
                       num_epochs,
                       max_batches=0,
                       dataset="train",
                       with_filepath=False):
        return [self.generateBatch()]

    def getBatchCount(self, batch_size, max_batches=0, dataset=""):
        return int(np.ceil(len(self.data) / float(batch_size)))
Пример #22
0
if __name__=='__main__':
  description='''Management script'''

  parser = argparse.ArgumentParser(description=description)
  parser.add_argument('-L', action='store_true', help='List')
  parser.add_argument('-A', action='store_true', help='Add')
  parser.add_argument('-t', metavar='token',     help='Token to add or remove')
  parser.add_argument('-a', metavar='action',    help='Action to take when triggered (accept/block/drop)')
  parser.add_argument('-d', metavar='database',  help='Database to be modified')
  parser.add_argument('-I', action='store_true', help='Case Insensitive')
  parser.add_argument('-B', action='store_true', help='Binary Blob (enter in hex)')
  parser.add_argument('-N', action='store_true', help='Notify - Alert the user right away')
  args = parser.parse_args()
  
  db=args.d if args.d else conf.getDB()
  
  if args.L:
    for x in conf.getTables():
      print("="*80 + "\n%s\n"%(x) + "="*80)
      for y in selectAllFrom(db, x):
        sys.stdout.write("|  ")
        for z in sorted(y.keys()):
          sys.stdout.write("%s: %s  |  "%(z, y[z]))
        print("")
  elif args.A:
    if args.t:
      # if args.B (Binary), get the clean hex version
      token=args.t if not args.B else is_hex(args.t)
      action=args.a.lower() if args.a else conf.getDefaultAction()
      alert=True if args.N else False
Пример #23
0
                        default=MODEL_DATE)
    parser.add_argument('--paper-note-path',
                        default='../paper-notes/data/words')
    parser.add_argument('--model-epoch',
                        help='epoch to continue for',
                        default=MODEL_EPOCH,
                        type=int)
    args = parser.parse_args()

    # TRAINING
    LOG_NAME = '{}-{}'.format("otf-iam-paper", args.model_date)
    model_folder = os.path.join(Constants.MODELS_PATH, LOG_NAME)
    models_path = os.path.join(model_folder,
                               'model-{}'.format(args.model_epoch))
    logger = Logger()
    config = Configuration.load(model_folder, "algorithm")
    algorithm = HtrNet(config['algo_config'])
    dataset = PreparedDataset.PreparedDataset(config['dataset'], False,
                                              config['data_config'])

    algorithm.configure(batch_size=config['batch'],
                        learning_rate=config['learning_rate'],
                        sequence_length=dataset.max_length,
                        image_height=dataset.meta["height"],
                        image_width=dataset.meta["width"],
                        vocab_length=dataset.vocab_length,
                        channels=dataset.channels,
                        class_learning_rate=config.default(
                            'class_learning_rate', config['learning_rate']))
    executor = Executor(algorithm, True, config, logger=logger)
Пример #24
0
 def augment(self, img, get_settings=False):
     augmentation_settings = {}
     if "warp" in self.config["otf_augmentations"]:
         if np.random.uniform() < self.config['otf_augmentations.warp.prob']:
             if(not self.config.default('preprocess.invert', False)):
                 img = 255 - img
             reshaped = False
             if len(img.shape) > 2:
                 reshaped = True
                 img = np.reshape(img, (img.shape[0], img.shape[1]))
             img = convert._cv2pil(img)
             img, mat = warp._warp(
                 img,
                 gridsize=self.config['otf_augmentations.warp.gridsize'],
                 deviation=self.config['otf_augmentations.warp.deviation'],
                 return_mat=True)
             augmentation_settings["warp"] = {
                 "gridsize": self.config['otf_augmentations.warp.gridsize'],
                 "mat": mat
             }
             img = convert._pil2cv2(img)
             if reshaped:
                 img = np.reshape(img, (img.shape[0], img.shape[1], 1))
             if(not self.config.default('preprocess.invert', False)):
                 img = 255 - img
     if "affine" in self.config["otf_augmentations"]:
         if(self.config.default('preprocess.invert', False)):
             img = 255 - img
         img, mat = affine._affine(
             img, self.config["otf_augmentations.affine"], return_mat=True)
         augmentation_settings["affine"] = {
             "mat": mat
         }
         if(self.config.default('preprocess.invert', False)):
             img = 255 - img
     if "morph" in self.config["otf_augmentations"]:
         img, op_name, op_values = morph._random_morph(
             img, self.config["otf_augmentations.morph"], self.config.default('preprocess.invert', False), True)
         augmentation_settings["affine"] = {
             "op_name": op_name,
             "op_values": op_values
         }
     if "binarize" in self.config["otf_augmentations"]:
         if np.random.uniform() < self.config['otf_augmentations.binarize.prob']:
             img = binarize._binarize(img)
             augmentation_settings["binarize"] = {}
     if "blur" in self.config["otf_augmentations"]:
         if np.random.uniform() < self.config['otf_augmentations.blur.prob']:
             img = cv2.GaussianBlur(
                 img, tuple(self.config['otf_augmentations.blur.kernel']), self.config['otf_augmentations.blur.sigma'])
             augmentation_settings["blur"] = {
                 "kernel": self.config['otf_augmentations.blur.kernel'],
                 "sigma": self.config['otf_augmentations.blur.sigma']
             }
     if "sharpen" in self.config["otf_augmentations"]:
         if np.random.uniform() < self.config['otf_augmentations.sharpen.prob']:
             img = self._unsharp_mask_filter(
                 img, tuple(self.config['otf_augmentations.sharpen.kernel']), self.config['otf_augmentations.sharpen.sigma'])
             augmentation_settings["sharpen"] = {
                 "kernel": self.config['otf_augmentations.sharpen.kernel'],
                 "sigma": self.config['otf_augmentations.sharpen.sigma']
             }
     if "brighten" in self.config["otf_augmentations"]:
         if np.random.uniform() < self.config['otf_augmentations.brighten.prob']:
             factor = np.random.normal(
                 self.config['otf_augmentations.brighten.center'], self.config['otf_augmentations.brighten.stdv'])
             factor = factor if factor >= 1 else 1
             img = np.uint8(np.clip(img * factor, 0, 255))
             augmentation_settings["brighten"] = {
                 "factor": factor
             }
     if "darken" in self.config["otf_augmentations"]:
         if np.random.uniform() < self.config['otf_augmentations.darken.prob']:
             factor = np.random.normal(
                 self.config['otf_augmentations.darken.center'], self.config['otf_augmentations.darken.stdv'])
             factor = factor if factor >= 1 else 1
             img = 255 - np.uint8(np.clip((255 - img) * factor, 0.0, 255.0))
             augmentation_settings["darken"] = {
                 "factor": factor
             }
     if not get_settings:
         return self.add_graychannel(img)
     else:
         return self.add_graychannel(img), Configuration(augmentation_settings)
Пример #25
0
from lib.Configuration import Configuration

c = Configuration()


def add_tracking_code(request):
    if c.is_tracking_active():
        return {"tracking_code": c.get_tracking_code()}
    else:
        return {}
Пример #26
0
class E2ERunner(object):
    def __init__(self, config={}, globalConfig={}):
        self.config = Configuration(config)
        self.globalConfig = Configuration(globalConfig)
        self._parse_config()
        self.logger = Logger()
        self.config()

    def _parse_config(self):
        self._parse_blocks(self.config["blocks"])
        self.viz = self._parse_visualizer(self.config.default("viz", None))
        self.gtprov = self._parse_gt(self.config.default("gt", None))
        self.evals = self._parse_evals(self.config.default('eval', []))

    def _parse_blocks(self, blocks):
        self.blocks = [
            self._parse_block(block) for block in blocks
            if "disabled" not in block or not block["disabled"]
        ]

    def _parse_block(self, block):
        if block["type"] == "TextSeparation":
            return TextSeparation(self.globalConfig, block)
        elif block["type"] == "WordDetection":
            return WordDetection(block)
        elif block["type"] == "LineSegmentation":
            return LineSegmentation(block)
        elif block["type"] == "ParagraphDetection":
            return ParagraphDetection(block)
        elif block["type"] == "UnigramLanguageModel":
            return UnigramLanguageModel(block)
        elif block["type"] == "Ceiling":
            return Ceiling(block)
        elif block["type"] == "TranscriptionAndClassification":
            return TranscriptionAndClassification(self.globalConfig, block)

    def _parse_evals(self, eval_configs):
        return [self._parse_eval(config) for config in eval_configs]

    def _parse_eval(self, config):
        if config is None:
            return None
        if config["type"] == "IoU":
            return IoU(config)
        elif config["type"] == "IoUPixelSum":
            return IoUPixelSum(config)
        elif config["type"] == "BagOfWords":
            return BagOfWords(config)
        elif config["type"] == "IoUCER":
            return IoUCER(config)

    def _parse_data(self, data_config):
        if isinstance(data_config, list):
            return data_config
        else:
            prefix = data_config["prefix"] if "prefix" in data_config else ""
            filenames = list(
                filter(
                    lambda f: f.endswith(data_config["suffix"]) and f.
                    startswith(prefix), os.listdir(data_config["path"])))
            if data_config["limit"] > 0:
                filenames = filenames[:data_config["limit"]]
            return [
                os.path.join(data_config["path"], filename)
                for filename in filenames
            ]

    def _parse_visualizer(self, viz_config):
        if viz_config is None:
            return None
        if viz_config["type"] == "RegionVisualizer":
            return RegionVisualizer(viz_config)
        elif viz_config["type"] == "ImageVisualizer":
            return ImageVisualizer(viz_config)
        elif viz_config["type"] == "SeparatedVisualizer":
            return SeparatedVisualizer(viz_config)

    def _parse_gt(self, gt_config):
        if gt_config is None:
            return None
        if gt_config["type"] == "WordRegion":
            return WordRegionGTProvider()
        elif gt_config["type"] == "ParagraphRegion":
            return ParagraphRegionGTProvider()
        elif gt_config["type"] == "LineRegion":
            return LineRegionGTProvider()

    def __call__(self, log_prefix="E2E", skip_range_evaluation=False):
        if not skip_range_evaluation and self.config.default("ranger", False):
            self.logger.write("Entering Range Execution Mode")
            return self._range_exec()
        start = time()
        self.scores = {}
        data = self._parse_data(self.config["data"])
        results = []
        times = []
        for idx, file in enumerate(data):
            file_time = time()
            self.logger.progress(log_prefix, idx, len(data))
            results.append(self._exec(file))
            times.append(time() - file_time)
        [block.close() for block in self.blocks]
        if len(self.evals) > 0:
            final_scores = {
                "time": time() - start,
                "median time": np.median(times),
                "avg time": np.average(times)
            }
            for score_key in self.scores:
                final_scores[score_key] = np.average(self.scores[score_key])
            self.logger.summary(log_prefix, final_scores)
        return results

    def _get_range(self):
        if type(self.config["ranger.values"]) is dict:
            return frange(self.config["ranger.values.from"],
                          self.config["ranger.values.to"],
                          self.config["ranger.values.step"])

    def _range_exec(self):
        def set_config(value):
            for path in self.config.default(
                    "ranger.paths", [self.config.default("ranger.path", [])]):
                current = self.config
                for step in path[:-1]:
                    current = current[step]
                current[path[-1]] = value
            self._parse_config()

        for val in self._get_range():
            set_config(val)
            prefix = self.config.default("ranger.template", "value {}")
            self(log_prefix=prefix.format(val), skip_range_evaluation=True)

    def _exec(self, file):
        original = cv2.imread(file)
        last_output = original.copy()

        for block in self.blocks:
            last_output = block(last_output, file)
        res = {"file": file, "original": original, "result": last_output}
        if self.gtprov is not None:
            gt = self.gtprov(file, original)
        if self.viz is not None:
            vizimage = res["original"].copy()
            if self.gtprov is not None and self.config.default(
                    'gt.viz', False):
                vizimage = self.viz(vizimage, gt, True)
            if len(self.blocks) > 0:
                vizimage = self.viz(vizimage, res["result"], False)
            self.viz.store(vizimage, file)
            res["viz"] = vizimage
        if len(self.evals) > 0:
            for evl in self.evals:
                scores = evl(gt, res["result"])
                for score_key in scores.keys():
                    self.scores[score_key] = [
                        scores[score_key]
                    ] if score_key not in self.scores else [
                        scores[score_key], *self.scores[score_key]
                    ]
        return res
Пример #27
0
# Cellz Configuration Management

from lib.Configuration import Configuration

CONFIG = Configuration()

# Django settings for homepage_cellz project.

DEBUG = True
TEMPLATE_DEBUG = DEBUG

ADMINS = (
    # ('Your Name', '*****@*****.**'),
)

MANAGERS = ADMINS
""" THE POSTGRES STUFF
'default': {
    'ENGINE': 'django.db.backends.postgresql_psycopg2',  # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
    'NAME': "lootgame", #CONFIG.get_DatabasePath(),  # Or path to database file if using sqlite3.
    'USER': '******',                      # Not used with sqlite3.
    'PASSWORD': '******',                  # Not used with sqlite3.
    'HOST': 'localhost',                      # Set to empty string for localhost. Not used with sqlite3.
    'PORT': '5432',                      # Set to empty string for default. Not used with sqlite3.
}
"""

DATABASES = {
    'default': {
        'ENGINE':
        'django.db.backends.sqlite3',  # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
Пример #28
0
 def __init__(self, config={}, globalConfig={}):
     self.config = Configuration(config)
     self.globalConfig = Configuration(globalConfig)
     self._parse_config()
     self.logger = Logger()
     self.config()
Пример #29
0
class ImageAugmenter(object):

    def __init__(self, config):
        self.config = Configuration(config)

    def augment(self, img, get_settings=False):
        augmentation_settings = {}
        if "warp" in self.config["otf_augmentations"]:
            if np.random.uniform() < self.config['otf_augmentations.warp.prob']:
                if(not self.config.default('preprocess.invert', False)):
                    img = 255 - img
                reshaped = False
                if len(img.shape) > 2:
                    reshaped = True
                    img = np.reshape(img, (img.shape[0], img.shape[1]))
                img = convert._cv2pil(img)
                img, mat = warp._warp(
                    img,
                    gridsize=self.config['otf_augmentations.warp.gridsize'],
                    deviation=self.config['otf_augmentations.warp.deviation'],
                    return_mat=True)
                augmentation_settings["warp"] = {
                    "gridsize": self.config['otf_augmentations.warp.gridsize'],
                    "mat": mat
                }
                img = convert._pil2cv2(img)
                if reshaped:
                    img = np.reshape(img, (img.shape[0], img.shape[1], 1))
                if(not self.config.default('preprocess.invert', False)):
                    img = 255 - img
        if "affine" in self.config["otf_augmentations"]:
            if(self.config.default('preprocess.invert', False)):
                img = 255 - img
            img, mat = affine._affine(
                img, self.config["otf_augmentations.affine"], return_mat=True)
            augmentation_settings["affine"] = {
                "mat": mat
            }
            if(self.config.default('preprocess.invert', False)):
                img = 255 - img
        if "morph" in self.config["otf_augmentations"]:
            img, op_name, op_values = morph._random_morph(
                img, self.config["otf_augmentations.morph"], self.config.default('preprocess.invert', False), True)
            augmentation_settings["affine"] = {
                "op_name": op_name,
                "op_values": op_values
            }
        if "binarize" in self.config["otf_augmentations"]:
            if np.random.uniform() < self.config['otf_augmentations.binarize.prob']:
                img = binarize._binarize(img)
                augmentation_settings["binarize"] = {}
        if "blur" in self.config["otf_augmentations"]:
            if np.random.uniform() < self.config['otf_augmentations.blur.prob']:
                img = cv2.GaussianBlur(
                    img, tuple(self.config['otf_augmentations.blur.kernel']), self.config['otf_augmentations.blur.sigma'])
                augmentation_settings["blur"] = {
                    "kernel": self.config['otf_augmentations.blur.kernel'],
                    "sigma": self.config['otf_augmentations.blur.sigma']
                }
        if "sharpen" in self.config["otf_augmentations"]:
            if np.random.uniform() < self.config['otf_augmentations.sharpen.prob']:
                img = self._unsharp_mask_filter(
                    img, tuple(self.config['otf_augmentations.sharpen.kernel']), self.config['otf_augmentations.sharpen.sigma'])
                augmentation_settings["sharpen"] = {
                    "kernel": self.config['otf_augmentations.sharpen.kernel'],
                    "sigma": self.config['otf_augmentations.sharpen.sigma']
                }
        if "brighten" in self.config["otf_augmentations"]:
            if np.random.uniform() < self.config['otf_augmentations.brighten.prob']:
                factor = np.random.normal(
                    self.config['otf_augmentations.brighten.center'], self.config['otf_augmentations.brighten.stdv'])
                factor = factor if factor >= 1 else 1
                img = np.uint8(np.clip(img * factor, 0, 255))
                augmentation_settings["brighten"] = {
                    "factor": factor
                }
        if "darken" in self.config["otf_augmentations"]:
            if np.random.uniform() < self.config['otf_augmentations.darken.prob']:
                factor = np.random.normal(
                    self.config['otf_augmentations.darken.center'], self.config['otf_augmentations.darken.stdv'])
                factor = factor if factor >= 1 else 1
                img = 255 - np.uint8(np.clip((255 - img) * factor, 0.0, 255.0))
                augmentation_settings["darken"] = {
                    "factor": factor
                }
        if not get_settings:
            return self.add_graychannel(img)
        else:
            return self.add_graychannel(img), Configuration(augmentation_settings)

    def binarization(self, img):
        if(self.config.default('preprocess.invert', False)):
            img = 255 - img
        _, img = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)
        if(self.config.default('preprocess.invert', False)):
            img = 255 - img
        return self.add_graychannel(img)

    def apply_augmentation(self, img, settings):
        if settings.default("warp", False):
            if(not self.config.default('preprocess.invert', False)):
                img = 255 - img
            reshaped = False
            if len(img.shape) > 2:
                reshaped = True
                img = np.reshape(img, (img.shape[0], img.shape[1]))
            img = convert._cv2pil(img)
            img = warp._warp(
                img,
                gridsize=settings['warp.gridsize'],
                mat=settings['warp.mat'])
            img = convert._pil2cv2(img)
            if reshaped:
                img = np.reshape(img, (img.shape[0], img.shape[1], 1))
            if(not self.config.default('preprocess.invert', False)):
                img = 255 - img
        if settings.default("affine", False):
            img = affine._affine(
                img, mat=settings["affine.mat"], background=255.0)
        if settings.default("morph", False):
            img = morph._morph(img, settings['morph.op_name'], settings['morph.op_values'], self.config.default(
                'preprocess.invert', False))
        if settings.default("binarize", False):
            img = binarize._binarize(img)
        if settings.default("blur", False):
            img = cv2.GaussianBlur(
                img, tuple(settings['blur.kernel']), settings['blur.sigma'])
        if settings.default("sharpen", False):
            img = self._unsharp_mask_filter(
                img, tuple(settings['sharpen.kernel']), settings['sharpen.sigma'])
        if settings.default("brighten", False):
            img = np.uint8(
                np.clip(img * settings["brighten.factor"], 0.0, 255.0))
        if settings.default("darken", False):
            img = 255 - np.uint8(
                np.clip((255 - img) * settings["darken.factor"], 0.0, 255.0))
        return self.add_graychannel(img)

    def _unsharp_mask_filter(self, image, kernel, sigma):
        gaussian_3 = cv2.GaussianBlur(image, kernel, sigma)
        return cv2.addWeighted(image, 1.5, gaussian_3, -0.5, 0, image)

    def add_graychannel(self, img):
        if len(img.shape) == 2:
            return np.reshape(img, [img.shape[0], img.shape[1], 1])
        return img

    def pad_to_size(self, img, height, width):
        return self._pad(img, (height, width, 1))

    def _scale(self, img, factor, target_size=None):
        height = int(img.shape[0] / factor)
        width = int(img.shape[1] / factor)
        if width <= 0 or height <= 0:
            return None
        return cv2.resize(img, (width, height))

    def _scale_img(self, img, scale_factor, target_size=None):
        if img.shape[0] == 0 or img.shape[1] == 0:
            return None
        factor = max(img.shape[0] / target_size[0],
                     img.shape[1] / target_size[1],
                     scale_factor)
        img = self._scale(img, factor)
        return img

    def preprocess(self, img, target_size=None):
        bg = 255
        if self.config.default('preprocess.invert', False):
            img = invert._invert(img)
            bg = 255 - bg

        if self.config.default('preprocess.crop', False):
            if img.shape[0] == 0 or img.shape[1] == 0:
                return None
            img = crop._crop(img)
            if img is None:
                return None

        if self.config.default('preprocess.scale', False):
            img = self._scale_img(
                img, self.config['preprocess.scale'], target_size)
            if img is None:
                return None

        if self.config.default('preprocess.padding', False):
            img = padding._pad_cv2(img, self.config['preprocess.padding'], bg)
        img = self.add_graychannel(img)
        if target_size != None:
            target_size = (
                target_size[0] +
                (self.config.default('preprocess.padding', 0)*2),
                target_size[1] +
                (self.config.default('preprocess.padding', 0)*2),
                1
            )
            img = self._pad(img, target_size)
        return img

    def postprocesss(self, img):
        if self.config.default('postprocess.binarize', False):
            img = self.binarization(img)
        return img

    def _pad(self, array, reference_shape, offsets=None):
        """
        array: Array to be padded
        reference_shape: tuple of size of ndarray to create
        offsets: list of offsets (number of elements must be equal to the dimension of the array)
        will throw a ValueError if offsets is too big and the reference_shape cannot handle the offsets
        """
        offsets = offsets if offsets is not None else [
            0] * len(array.shape)
        # Create an array of zeros with the reference shape
        result = np.zeros(reference_shape)
        # Create a list of slices from offset to offset + shape in each dimension
        insertHere = [slice(offsets[dim], offsets[dim] + array.shape[dim])
                      for dim in range(array.ndim)]
        # Insert the array in the result at the specified offsets
        result[tuple(insertHere)] = array
        return result
 def _load_meta(self):
     self.meta = Configuration(util.loadJson(self.model_path, "data_meta"))
Пример #31
0
from django.core.context_processors import csrf
from django.contrib.auth.decorators import login_required
from django.template import RequestContext

from core.content import SkillCategory
from core.controller.tutorial.TutorialRedirectDecorator import TutorialRedirectDecorator
from lib.api.mission import missionAPI
from lib.api.user import userAPI

import lib.log.logger as _logger

_loggerinstance = _logger.getInstance()

from lib.Configuration import Configuration

_configuration = Configuration()


@login_required(login_url='/index/')
@TutorialRedirectDecorator
def view_planet(request):
    if missionAPI.user_participates_in_mission(request.user):
        return HttpResponseRedirect("/current_mission/")
    mission = missionAPI.get_possible_missions_for_player(request.user)
    c = RequestContext(request, {"missions": mission})
    return render_to_response('mission/planet.html', c)


@login_required(login_url='/index/')
def prepare_mission(request, mission_id):
    """ Method for starting a mission """
Пример #32
0
 def __init__(self, config):
     self.config = Configuration(config)