예제 #1
0

from settings import argparse_settings
sett = argparse_settings("Train detector")
dsettings = sett['detector']


#import argparse

#parser = argparse.ArgumentParser(description='Train mixture model on edge data')
#parser.add_argument('patches', metavar='<patches file>', type=argparse.FileType('rb'), help='Filename of patches file')
#parser.add_argument('model', metavar='<output model file>', type=argparse.FileType('wb'), help='Filename of the output models file')
#parser.add_argument('mixtures', metavar='<number mixtures>', type=int, help='Number of mixture components')
#parser.add_argument('--use-voc', action='store_true', help="Use VOC data to train model")

import gv
import glob
import os
import os.path
import amitgroup as ag

ag.set_verbose(True)

#descriptor = gv.load_descriptor(gv.BinaryDetector.DESCRIPTOR, sett)
descriptor = gv.load_descriptor(sett)
detector = gv.BernoulliDetector(dsettings['num_mixtures'], descriptor, dsettings)

if dsettings['use_voc']:
    files = gv.voc.load_object_images_of_size(sett['voc'], 'bicycle', dsettings['image_size'], dataset='train')
else:
    base_path = ''
from __future__ import division
from settings import argparse_settings
sett = argparse_settings("Check prevalence")
esettings = sett['edges']
dsettings = sett['parts']

import numpy as np
import matplotlib.pylab as plt
import amitgroup as ag
import glob
import os
import sys
import gv


descriptor = gv.BinaryDescriptor.getclass('edges').load_from_dict(esettings)

base_path = dsettings.get('base_path')
path = dsettings['image_dir']
if base_path is not None:
    path = os.path.join(os.environ[base_path], path)

files = glob.glob(path)[:1]

counts = np.zeros(8)
tots = 0

def process(f):
    #print "Processing file {0}".format(f)
    im = gv.img.load_image(f)
    edges = descriptor.extract_features(im, {'radius': 0})
    ag.info('{0} Farming {1}'.format(i, fileobj.img_id))
    img = gv.img.load_image(fileobj.path)
    grayscale_img = gv.img.asgray(img)

    bbobjs = detector.detect_coarse(grayscale_img, fileobj=fileobj, mixcomps=[mixcomp], use_padding=False, use_scale_prior=False, cascade=True, discard_weak=True, more_detections=True, farming=True, save_samples=True)
    for bbobj in bbobjs:
        bbobj.img_id = fileobj.img_id
        if bbobj.confidence > threshold: 
            topsy.append(bbobj)

    return topsy


if gv.parallel.main(__name__):
    from settings import argparse_settings
    settings = argparse_settings("Train real-valued detector")
    dsettings = settings['detector']

    #descriptor = gv.load_descriptor(gv.RealDetector.DESCRIPTOR, sett)
    descriptor = gv.load_descriptor(settings)
    detector = gv.RealDetector(descriptor, dsettings)

    files = get_training_files(detector)

    neg_files = sorted(glob.glob(os.path.expandvars(dsettings['neg_dir'])))[:dsettings.get('neg_limit')]
    #pos_images = []
    image_size = detector.settings['image_size']

    print('Edge type:', descriptor.settings.get('edge_type', '(none)'))

    # Extract clusters (manual or through EM)
예제 #4
0
from settings import argparse_settings

sett = argparse_settings("Train detector")
dsettings = sett['detector']

#import argparse

#parser = argparse.ArgumentParser(description='Train mixture model on edge data')
#parser.add_argument('patches', metavar='<patches file>', type=argparse.FileType('rb'), help='Filename of patches file')
#parser.add_argument('model', metavar='<output model file>', type=argparse.FileType('wb'), help='Filename of the output models file')
#parser.add_argument('mixtures', metavar='<number mixtures>', type=int, help='Number of mixture components')
#parser.add_argument('--use-voc', action='store_true', help="Use VOC data to train model")

import gv
import glob
import os
import os.path
import amitgroup as ag

ag.set_verbose(True)

#descriptor = gv.load_descriptor(gv.BinaryDetector.DESCRIPTOR, sett)
descriptor = gv.load_descriptor(sett)
detector = gv.BernoulliDetector(dsettings['num_mixtures'], descriptor,
                                dsettings)

if dsettings['use_voc']:
    files = gv.voc.load_object_images_of_size(sett['voc'],
                                              'bicycle',
                                              dsettings['image_size'],
                                              dataset='train')
예제 #5
0

from settings import argparse_settings 
sett = argparse_settings("Train parts") 

psettings = sett[sett['detector']['descriptor']]

import parser
import gv
import amitgroup as ag
import os.path
import random
import glob

ag.set_verbose(True)
if gv.parallel.main(__name__):

    path = os.path.expandvars(psettings['image_dir'])

    files = glob.glob(path)
    random.seed(0)
    random.shuffle(files)

    settings = dict(samples_per_image=psettings['samples_per_image'])
    cls = gv.BinaryDescriptor.getclass(sett['detector']['descriptor'])
    codebook = cls(psettings['part_size'], psettings['num_parts'], settings=psettings) 
    codebook.train_from_images(files[:psettings['num_images']])
    print('num parts', codebook.num_parts)
    codebook.save(psettings['file'])

예제 #6
0
from __future__ import division
from settings import argparse_settings
sett = argparse_settings("Check prevalence")
esettings = sett['edges']
dsettings = sett['parts']

import numpy as np
import matplotlib.pylab as plt
import amitgroup as ag
import glob
import os
import sys
import gv

descriptor = gv.BinaryDescriptor.getclass('edges').load_from_dict(esettings)

base_path = dsettings.get('base_path')
path = dsettings['image_dir']
if base_path is not None:
    path = os.path.join(os.environ[base_path], path)

files = glob.glob(path)[:1]

counts = np.zeros(8)
tots = 0


def process(f):
    #print "Processing file {0}".format(f)
    im = gv.img.load_image(f)
    edges = descriptor.extract_features(im, {'radius': 0})