Пример #1
0
def create_nanodet(cfg_path, weights):
    load_config(cfg, cfg_path)
    model_cfg = cfg.model
    model_cfg.arch.backbone.pop("name")
    model_cfg.arch.fpn.pop("name")
    model_cfg.arch.head.pop("name")
    model = OneStageDetector(model_cfg.arch.backbone, model_cfg.arch.fpn,
                             model_cfg.arch.head)

    checkpoint = load(weights, map_location='cpu')['state_dict']
    ckpt = {k.replace('model.', ''): v for k, v in checkpoint.items()}
    model.load_state_dict(ckpt)

    return model
Пример #2
0
 def __init__(self):
     self.my_name = None
     self.my_name_alias = None
     self.my_tutorial_ids = list()
     self.other_tutorial_ids = list()
     self.tutorials = dict()
     self.students = dict()
     self.exported_students = list()
     self.imported_students = list()
     self.scores = dict()
     self.account_data = mixin_passwords(load_config("account_data.json"))
     self.config = load_config("config.json")
     storage_config = self.config.storage
     self.physical_storage = PhysicalDataStorage(storage_config)
Пример #3
0
def create_nanodet(cfg_path, weights):
    load_config(cfg, cfg_path)
    model_cfg = cfg.model
    model_cfg.arch.backbone.pop("name")
    model_cfg.arch.fpn.pop("name")
    model_cfg.arch.head.pop("name")
    model_cfg.arch.pop("name")

    model = NanoDetPlus(**model_cfg.arch)

    checkpoint = load(weights, map_location='cpu')['state_dict']
    ckpt = {k.replace('model.', ''): v for k, v in checkpoint.items()}

    model.load_state_dict(ckpt, strict=False)

    return model
def pairwise_stats():
    cfg = load_config()
    dataset = create_dataset(cfg)
    dataset.set_shuffle(True)
    dataset.set_pairwise_stats_collect(True)

    num_images = dataset.num_images
    all_pairwise_differences = {}

    if cfg.mirror:
        num_images *= 2

    for k in range(num_images):
        print('processing image {}/{}'.format(k, num_images-1))

        batch = dataset.next_batch()
        batch_stats = batch[Batch.data_item].pairwise_stats
        for joint_pair in batch_stats:
            if joint_pair not in all_pairwise_differences:
                all_pairwise_differences[joint_pair] = []
            all_pairwise_differences[joint_pair] += batch_stats[joint_pair]

    stats = {}
    for joint_pair in all_pairwise_differences:
        stats[joint_pair] = {}
        stats[joint_pair]["mean"] = np.mean(all_pairwise_differences[joint_pair], axis=0)
        stats[joint_pair]["std"] = np.std(all_pairwise_differences[joint_pair], axis=0)

    save_stats(stats, cfg)
Пример #5
0
    def __new__(cls):
        if InteractiveDataStorage.__instance is None:
            InteractiveDataStorage.__instance = object.__new__(cls)
        InteractiveDataStorage.__instance.my_name = None
        InteractiveDataStorage.__instance.my_name_alias = None
        InteractiveDataStorage.__instance.my_tutorial_ids = list()
        InteractiveDataStorage.__instance.other_tutorial_ids = list()
        InteractiveDataStorage.__instance.tutorials = dict()
        InteractiveDataStorage.__instance.students = dict()
        InteractiveDataStorage.__instance.exported_students = list()
        InteractiveDataStorage.__instance.imported_students = list()
        InteractiveDataStorage.__instance.scores = dict()
        InteractiveDataStorage.__instance.account_data = load_config("account_data.json")
        InteractiveDataStorage.__instance.config = load_config("config.json")
        storage_config = InteractiveDataStorage.__instance.config.storage
        InteractiveDataStorage.__instance.physical_storage = PhysicalDataStorage(storage_config)

        return InteractiveDataStorage.__instance
Пример #6
0
    def __init__(self, log_flag=None, debug_flag=None, debug_dir=None, dbpwd=None):

        self.logger = App.set_up_logger(log_flag, debug_flag)
        self.debug_enabled = debug_flag
        self.debug_dir = None

        self.config = load_config('conf.ini')
        dsn_name = self.get_config('DSN', 'Name')
        self.db_connection = new_connection(dsn_name, dbpwd)
Пример #7
0
def display_dataset():
    logging.basicConfig(level=logging.DEBUG)

    cfg = load_config()
    dataset = dataset_create(cfg)
    dataset.set_shuffle(False)

    while True:
        batch = dataset.next_batch()

        for frame_id in range(1):
            img = batch[Batch.inputs][frame_id, :, :, :]
            img = np.squeeze(img).astype('uint8')

            scmap = batch[Batch.part_score_targets][frame_id, :, :, :]
            scmap = np.squeeze(scmap)

            # scmask = batch[Batch.part_score_weights]
            # if scmask.size > 1:
            #     scmask = np.squeeze(scmask).astype('uint8')
            # else:
            #     scmask = np.zeros(img.shape)

            subplot_height = 4
            subplot_width = 5
            num_plots = subplot_width * subplot_height
            f, axarr = plt.subplots(subplot_height, subplot_width)

            for j in range(num_plots):
                plot_j = j // subplot_width
                plot_i = j % subplot_width

                curr_plot = axarr[plot_j, plot_i]
                curr_plot.axis('off')

                if j >= cfg.num_joints:
                    continue

                scmap_part = scmap[:, :, j]
                scmap_part = imresize(scmap_part, 8.0, interp='nearest')
                scmap_part = np.lib.pad(scmap_part, ((4, 0), (4, 0)),
                                        'minimum')

                curr_plot.set_title("{}".format(j + 1))
                curr_plot.imshow(img)
                curr_plot.hold(True)
                curr_plot.imshow(scmap_part, alpha=.5)

        # figure(0)
        # plt.imshow(np.sum(scmap, axis=2))
        # plt.figure(100)
        # plt.imshow(img)
        # plt.figure(2)
        # plt.imshow(scmask)
        plt.show()
        plt.waitforbuttonpress()
Пример #8
0
def main(args):

    model_class = get_model(args[1])
    dataset = dataset_by_name(args[2])

    params = get_params(args[1], dataset.name)
    load_config(dataset.name, optional=True)

    with tf.variable_scope('data'):
        tf.logging.debug('Load data')
        train_data = to_tf_dataset(dataset, is_train=True, batch_size=cfg.batch_size,
                                   aug_strength=cfg.data_aug, 
                                   aug_prob=cfg.aug_prob,
                                   aug_flip=cfg.aug_flip)
        test_data = to_tf_dataset(dataset, is_train=False, batch_size=cfg.batch_size)

        iterator = tf.data.Iterator.from_structure(train_data.output_types,
                                                   train_data.output_shapes)
        img, label = iterator.get_next()

        tf.logging.debug('Creating iterator initializer')
        train_init = iterator.make_initializer(train_data)
        test_init = iterator.make_initializer(test_data)

    tf.train.create_global_step()
    create_epoch()

    tf.logging.debug('Creating model graph')
    model = model_class(img=img, label=label, num_classes=dataset.num_classes, **params)

    ckpt_dir = get_dir(cfg.ckpt_dir, dataset.name, model.name)
    log_dir = get_dir(cfg.log_dir, dataset.name, model.name)

    if cfg.stop_before_session:
        exit()

    tf.logging.debug('Starting session')
    with tf.Session() as sess:
        try:
            train_with_test(sess, model, train_init, test_init, ckpt_dir, log_dir)
        except KeyboardInterrupt:
            print("Manual interrupt occured")
Пример #9
0
def test_net(visualise, cache_scoremaps):
    logging.basicConfig(level=logging.INFO)

    cfg = load_config()
    dataset = create_dataset(cfg)
    dataset.set_shuffle(False)
    dataset.set_test_mode(True)

    sess, inputs, outputs = setup_pose_prediction(cfg)

    if cache_scoremaps:
        out_dir = cfg.scoremap_dir
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

    num_images = dataset.num_images
    predictions = np.zeros((num_images, ), dtype=np.object)

    for k in range(num_images):
        print('processing image {}/{}'.format(k, num_images - 1))

        batch = dataset.next_batch()

        outputs_np = sess.run(outputs, feed_dict={inputs: batch[Batch.inputs]})

        scmap, locref, pairwise_diff = extract_cnn_output(outputs_np, cfg)

        pose = argmax_pose_predict(scmap, locref, cfg.stride)

        pose_refscale = np.copy(pose)
        pose_refscale[:, 0:2] /= cfg.global_scale
        predictions[k] = pose_refscale

        if visualise:
            img = np.squeeze(batch[Batch.inputs]).astype('uint8')
            visualize.show_heatmaps(cfg, img, scmap, pose)
            visualize.waitforbuttonpress()

        if cache_scoremaps:
            base = os.path.basename(batch[Batch.data_item].im_path)
            raw_name = os.path.splitext(base)[0]
            out_fn = os.path.join(out_dir, raw_name + '.mat')
            scipy.io.savemat(out_fn,
                             mdict={'scoremaps': scmap.astype('float32')})

            out_fn = os.path.join(out_dir, raw_name + '_locreg' + '.mat')
            if cfg.location_refinement:
                scipy.io.savemat(
                    out_fn, mdict={'locreg_pred': locref.astype('float32')})

    scipy.io.savemat('predictions.mat', mdict={'joints': predictions})

    sess.close()
Пример #10
0
async def main(event_loop):
    args = parse_args()
    config_env_overrides = {
        "is_prod": not args.dev,
        "token": args.token,
        "yandex_dev_api_key": args.yandex_dev_api_key,
    }
    conf = config.load_config("config", "env.yaml", config_env_overrides)

    log.setup_logging(conf)

    telepot.aio.api.set_proxy(conf["core"]["proxy"])

    knowledge_base = MongoKnowledgeBase.build(
        host=conf["mongo_knowledge_base"]["db_host"],
        port=conf["mongo_knowledge_base"]["db_port"],
        db_name=conf["mongo_knowledge_base"]["db_name"],
        db_collection=conf["mongo_knowledge_base"]["db_collection"],
    )

    intelligence_registry = chat_intelligence.IntelligenceRegistry(
        core_constructor=functools.partial(
            intelligence_core_factory.build,
            event_loop=event_loop,
            knowledge_base=knowledge_base,
            http_session=aiohttp.ClientSession(),
            user_agent=conf["core"]["user_agent"],
            markov_chain_worker=concurrent.futures.ThreadPoolExecutor(
                max_workers=5),
            conf=conf,
        ))

    bot_accessor = BotAccessor()
    bot_accessor.set(
        bot_factory.build(
            bot_token=conf["token"],
            bot_name=conf["bot_name"],
            bot_accessor=bot_accessor,
            event_loop=event_loop,
            intelligence_registry=intelligence_registry,
            knowledge_base=knowledge_base,
            telepot_http_timeout=conf["telepot"]["http_timeout"],
            conf=conf,
        ))

    await MessageLoop(bot_accessor()).run_forever()
Пример #11
0
def main():
    print("===== dotfiles =====")
    config = load_config()

    for path in filter(Path.is_file, DOTFILES_PATH.rglob("*")):
        if any(str(path).endswith(ext) for ext in BINARY_FILETYPES):
            entry = BinaryFile(path)
        else:
            entry = TemplateFile(path, config)

        entry.run()

    if "gsettings" in config:
        for schema, reccords in config["gsettings"].items():
            print(f"\n===== gsettings: {schema} =====")

            for key, val in reccords.items():
                GSettings(schema, key, val).run()
Пример #12
0
def main():
    # Create main logger
    logger = get_logger('UNet3DTrainer')

    # Load and log experiment configuration
    config = load_config()
    logger.info(config)

    manual_seed = config.get('manual_seed', None)
    if manual_seed is not None:
        logger.info(f'Seed the RNG for all devices with {manual_seed}')
        torch.manual_seed(manual_seed)
        # see https://pytorch.org/docs/stable/notes/randomness.html
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    # Create the model
    model = get_model(config)
    # put the model on GPUs
    logger.info(f"Sending the model to '{config['device']}'")
    #     model = model.to(config['device'])
    #     # Log the number of learnable parameters
    logger.info(f'Number of learnable params {get_number_of_learnable_parameters(model)}')

    # Create loss criterion
    loss_criterion = torch.nn.BCELoss(reduction='mean')
    # Create evaluation metric
    eval_criterion = loss_criterion

    # Create data loaders
    loaders = get_train_loaders(config)

    # Create the optimizer
    optimizer = _create_optimizer(config, model)

    # Create learning rate adjustment strategy
    lr_scheduler = _create_lr_scheduler(config, optimizer)

    # Create model trainer
    trainer = _create_trainer(config, model=model, optimizer=optimizer, lr_scheduler=lr_scheduler,
                              loss_criterion=loss_criterion, eval_criterion=eval_criterion, loaders=loaders,
                              logger=logger)
    # Start training
    trainer.fit()
Пример #13
0
def mnist(ntrain=60000, ntest=10000, onehot=True):
    datasets_dir = load_config()['datafiles']['directory']
    data_dir = os.path.join(datasets_dir, 'mnist/')
    fd = open(os.path.join(data_dir, 'train-images-idx3-ubyte'))
    loaded = np.fromfile(file=fd,dtype=np.uint8)
    trX = loaded[16:].reshape((60000,28*28)).astype(float)

    fd = open(os.path.join(data_dir, 'train-labels-idx1-ubyte'))
    loaded = np.fromfile(file=fd, dtype=np.uint8)
    trY = loaded[8:].reshape((60000))

    fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))
    loaded = np.fromfile(file=fd, dtype=np.uint8)
    teX = loaded[16:].reshape((10000,28*28)).astype(float)

    fd = open(os.path.join(data_dir, 't10k-labels-idx1-ubyte'))
    loaded = np.fromfile(file=fd, dtype=np.uint8)
    teY = loaded[8:].reshape((10000))

    trX = trX/255.
    teX = teX/255.

    trX = trX[:ntrain]
    trY = trY[:ntrain]

    teX = teX[:ntest]
    teY = teY[:ntest]

    if onehot:
        trY = one_hot(trY, 10)
        teY = one_hot(teY, 10)
    else:
        trY = np.asarray(trY)
        teY = np.asarray(teY)

    return trX, teX, trY, teY
Пример #14
0
        #draw LHip -> LKnee(11 ->13)
        if all(pose[11]) and all(pose[13]):
            draw.line([tuple(pose[11]), tuple(pose[13])],width = thickness, fill=(51,51,204))
        #draw LKnee -> LFoot (13 ->15)
        if all(pose[13]) and all(pose[15]):
            draw.line([tuple(pose[13]), tuple(pose[15])],width = thickness, fill=(51,51,204))
    
    return image, draw_person


parser = argparse.ArgumentParser(description="Tensorflow Pose Estimation Example")
parser.add_argument("--image", type=str, default = "demo/image_multi.png", help="image file name")
args = parser.parse_args()

cfg = load_config("demo/pose_cfg_multi.yaml")

dataset = create_dataset(cfg)

sm = SpatialModel(cfg)
sm.load()

#draw_multi = PersonDraw()

# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)

# Read image from file
file_name = args.image
#image = imread(file_name, mode='RGB')
image = Image.open(file_name).convert('RGB')
Пример #15
0
import typing

import cv2
import numpy
import scipy.stats

from tqdm import tqdm

from util.config import load_config
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input

cfg = load_config('demo/pose_cfg.yaml')
sess, inputs, outputs = predict.setup_pose_prediction(cfg)


def angle_between(
    p1: typing.Tuple[float, float],
    p2: typing.Tuple[float, float],
) -> float:
    ang1 = numpy.arctan2(*p1[::-1])
    ang2 = numpy.arctan2(*p2[::-1])
    return numpy.rad2deg((ang1 - ang2) % (2 * numpy.pi))


def get_video_frames(source) -> typing.Generator[bytes, None, None]:
    while source:
        ret, frame = source.read()
        if not ret:
            break
def test_net(visualise, cache_scoremaps, development):
    logging.basicConfig(level=logging.INFO)

    cfg = load_config()
    dataset = create_dataset(cfg)
    dataset.set_shuffle(False)

    sm = SpatialModel(cfg)
    sm.load()

    draw_multi = PersonDraw()

    from_cache = "cached_scoremaps" in cfg
    if not from_cache:
        sess, inputs, outputs = setup_pose_prediction(cfg)

    if cache_scoremaps:
        out_dir = cfg.scoremap_dir
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

    pairwise_stats = dataset.pairwise_stats
    num_images = dataset.num_images if not development else min(
        10, dataset.num_images)
    coco_results = []

    for k in range(num_images):
        print('processing image {}/{}'.format(k, num_images - 1))

        batch = dataset.next_batch()

        cache_name = "{}.mat".format(batch[Batch.data_item].coco_id)

        if not from_cache:
            outputs_np = sess.run(outputs,
                                  feed_dict={inputs: batch[Batch.inputs]})
            scmap, locref, pairwise_diff = extract_cnn_output(
                outputs_np, cfg, pairwise_stats)

            if cache_scoremaps:
                if visualise:
                    img = np.squeeze(batch[Batch.inputs]).astype('uint8')
                    pose = argmax_pose_predict(scmap, locref, cfg.stride)
                    arrows = argmax_arrows_predict(scmap, locref,
                                                   pairwise_diff, cfg.stride)
                    visualize.show_arrows(cfg, img, pose, arrows)
                    visualize.waitforbuttonpress()
                    continue

                out_fn = os.path.join(out_dir, cache_name)
                dict = {
                    'scoremaps': scmap.astype('float32'),
                    'locreg_pred': locref.astype('float32'),
                    'pairwise_diff': pairwise_diff.astype('float32')
                }
                scipy.io.savemat(out_fn, mdict=dict)
                continue
        else:
            # cache_name = '1.mat'
            full_fn = os.path.join(cfg.cached_scoremaps, cache_name)
            mlab = scipy.io.loadmat(full_fn)
            scmap = mlab["scoremaps"]
            locref = mlab["locreg_pred"]
            pairwise_diff = mlab["pairwise_diff"]

        detections = extract_detections(cfg, scmap, locref, pairwise_diff)
        unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(
            sm, detections)
        person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array,
                                                     pos_array)

        if visualise:
            img = np.squeeze(batch[Batch.inputs]).astype('uint8')
            # visualize.show_heatmaps(cfg, img, scmap, pose)
            """
            # visualize part detections after NMS
            visim_dets = visualize_detections(cfg, img, detections)
            plt.imshow(visim_dets)
            plt.show()
            visualize.waitforbuttonpress()
            """

            #            """
            visim_multi = img.copy()
            draw_multi.draw(visim_multi, dataset, person_conf_multi)

            plt.imshow(visim_multi)
            plt.show()
            visualize.waitforbuttonpress()
        #            """

        if cfg.use_gt_segm:
            coco_img_results = pose_predict_with_gt_segm(
                scmap, locref, cfg.stride, batch[Batch.data_item].gt_segm,
                batch[Batch.data_item].coco_id)
            coco_results += coco_img_results
            if len(coco_img_results):
                dataset.visualize_coco(coco_img_results,
                                       batch[Batch.data_item].visibilities)

    if cfg.use_gt_segm:
        with open('predictions_with_segm.json', 'w') as outfile:
            json.dump(coco_results, outfile)

    sess.close()
Пример #17
0
import os
import sys

sys.path.append(os.path.dirname(__file__) + "/../")

from scipy.misc import imread

from nnet import predict
from util import visualize
from util.config import load_config
from dataset.pose_dataset import data_to_input

cfg = load_config("demo/pose_cfg.yaml")

# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)

# Read image from file
file_name = "demo/image.png"
image = imread(file_name, mode='RGB')

image_batch = data_to_input(image)

# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref, _ = predict.extract_cnn_output(outputs_np, cfg)

# Extract maximum scoring location from the heatmap, assume 1 person
pose = predict.argmax_pose_predict(scmap, locref, cfg.stride)
print(pose)
print(type(pose))
Пример #18
0
def load_all(config, args, mode):
    features_path = get_results_path(config.results_path, 'features',
                                     args.prefix, mode)
    image_patches = np.load(features_path / 'image_patches.npy')
    feature_matrix = np.load(features_path / 'feature_matrix.npy')
    labels = np.load(features_path / 'labels.npy')
    return image_patches, feature_matrix, labels


if __name__ == '__main__':
    logger = get_logger('visual_histograms')
    set_excepthook(logger)

    args = parse_arguments()
    config = load_config(args.config)
    set_seed(config.seed)
    model_type = 'bow' if args.bow else 'fv'
    CLUSTERS_NUM = 10

    # train_image_patches, train_feature_matrix, train_labels = load_all(config, args, 'train')
    test_image_patches, test_feature_matrix, test_labels = load_all(
        config, args, 'test')
    model_path = get_results_path(config.results_path, model_type, args.prefix,
                                  str(CLUSTERS_NUM))
    model = joblib.load(model_path / 'best_model.pkl').best_estimator_
    transformer_name = 'bag_of_words' if args.bow else 'fisher_vector'
    transformer = model.named_steps[transformer_name]

    # train_points = transformer.transform(train_feature_matrix)
    test_points = transformer.transform(test_feature_matrix)
Пример #19
0
import cPickle
import traceback
from collections import Counter
from multiprocessing import Pool

import tqdm
import fire
import h5py
import numpy as np
from keras.utils.np_utils import to_categorical

from misc import get_logger, Option

from util.config import load_config

opt = load_config('./config/myconfig.json')

TRAIN_DATA_FILE_LIST = ['train.chunk.0%d' % i for i in range(1, 10)]
DEV_DATA_FILE_LIST = ['dev.chunk.01']
TEST_DATA_FILE_LIST = ['test.chunk.01', 'test.chunk.02']

TRAIN_DATA_LIST = [
    "%s/%s" % (opt.data_dir, filename) for filename in TRAIN_DATA_FILE_LIST
]
DEV_DATA_LIST = [
    "%s/%s" % (opt.data_dir, filename) for filename in DEV_DATA_FILE_LIST
]
TEST_DATA_LIST = [
    "%s/%s" % (opt.data_dir, filename) for filename in TEST_DATA_FILE_LIST
]
Пример #20
0
TF_CUDNN_USE_AUTOTUNE = 0

import sys, os
from PIL import Image

sys.path.insert(1, 'pose_tensorflow')

from util.config import load_config
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input

os.chdir("pose_tensorflow")
cfg = {}
cfg['cfg'] = load_config("demo/pose_cfg.yaml")
cfg['sess'], cfg['inputs'], cfg['outputs'] = predict.setup_pose_prediction(
    cfg['cfg'])
os.chdir("..")


def resize_image(img: Image):
    basewidth = 300

    wpercent = basewidth / img.size[0]
    hsize = int(img.size[1] * wpercent)
    img = img.resize((basewidth, hsize), Image.ANTIALIAS)

    return img


def get_pose(image, d=cfg):
Пример #21
0
    threshold = 0

    # initialize cocoGT api
    annFile = '%s/annotations/%s_%s.json' % (dataset, dataset_ann,
                                             dataset_phase)
    cocoGT = COCO(annFile)

    # initialize cocoPred api
    inFile = "predictions_with_segm.json"
    predFile = apply_threhsold(inFile, threshold)
    cocoPred = cocoGT.loadRes(predFile)

    return cocoGT, cocoPred


def eval_mscoco_with_segm(cocoGT, cocoPred):
    # running evaluation
    cocoEval = COCOeval(cocoGT, cocoPred, "keypoints")
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    args, unparsed = parser.parse_known_args()

    cfg = load_config()

    cocoGT, cocoPred = eval_init(cfg)
    eval_mscoco_with_segm(cocoGT, cocoPred)
Пример #22
0
from pandas import DataFrame

matplotlib.use("TKAgg")
from matplotlib import pyplot as plt
from util.config import load_config
from nnet import predict
from sklearn.model_selection import train_test_split
from dataset.pose_dataset import data_to_input
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc
from util import visualize
import pickle

# import resize_images

cfg = load_config("pose_cfg.yaml")

# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)

# Read images from a path
pose_image_resources_downward = "../pose_images/Database_Resized/DownwardDog_mixed/*"
pose_image_resources_plank = "../pose_images/Database_Resized/Plank_mixed/*"
pose_image_resources_tree = "../pose_images/Database_Resized/Tree_mixed/*"
pose_image_resources_warrior = "../pose_images/Database_Resized/WarriorII_mixed/*"

pose_image_resources_all_right = "../pose_images/Database_Resized/all_poses_right/*"
# Uncomment this line and comment line before for development purposes (increase time execution)£
#
# pose_image_resources = "../pose_images/acc/*.jpeg"  # 26 samples 6 testing set --> Score 0,767 (n_estimators=40, max_depth=20) 0,916
# pose_image_resources ="../pose_images/all_tree/*.jpeg"
Пример #23
0
def init():
    global api, config
    config = load_config()
    api = load_api(config)
Пример #24
0
def train():
    setup_logging()

    cfg = load_config()
    dataset = create_dataset(cfg)

    batch_spec = get_batch_spec(cfg)
    batch, enqueue_op, placeholders = setup_preloading(batch_spec)

    losses = pose_net(cfg).train(batch)
    total_loss = losses['total_loss']

    for k, t in losses.items():
        tf.summary.scalar(k, t)
    merged_summaries = tf.summary.merge_all()

    variables_to_restore = slim.get_variables_to_restore(include=["resnet_v1"])
    restorer = tf.train.Saver(variables_to_restore)
    saver = tf.train.Saver(max_to_keep=5)

    sess = tf.Session()

    coord, thread = start_preloading(sess, enqueue_op, dataset, placeholders)

    train_writer = tf.summary.FileWriter(cfg.log_dir, sess.graph)

    learning_rate, train_op = get_optimizer(total_loss, cfg)

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    # Restore variables from disk.
    restorer.restore(sess, cfg.init_weights)

    max_iter = int(cfg.multi_step[-1][1])

    display_iters = cfg.display_iters
    cum_loss = 0.0
    lr_gen = LearningRate(cfg)

    for it in range(max_iter+1):
        current_lr = lr_gen.get_lr(it)
        [_, loss_val, summary] = sess.run([train_op, total_loss, merged_summaries],
                                          feed_dict={learning_rate: current_lr})
        cum_loss += loss_val
        train_writer.add_summary(summary, it)

        if it % display_iters == 0:
            average_loss = cum_loss / display_iters
            cum_loss = 0.0
            logging.info("iteration: {} loss: {} lr: {}"
                         .format(it, "{0:.4f}".format(average_loss), current_lr))

        # Save snapshot
        if (it % cfg.save_iters == 0 and it != 0) or it == max_iter:
            model_name = cfg.snapshot_prefix
            saver.save(sess, model_name, global_step=it)

    sess.close()
    coord.request_stop()
    coord.join([thread])
Пример #25
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

""" main.py


"""

import os
import random
import sys

import webapp2


APP_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, APP_ROOT_DIR + '/lib/')

from util.config import load_config
load_config(APP_ROOT_DIR + "/config.json")

random.seed()

application = webapp2.WSGIApplication([
    ('/__task__/fetch', 'handler.fetch_handler.FetchHandler'),
    ('/__task__/post', 'handler.post_handler.PostHandler'),
    ('/__task__/follow', 'handler.follow_handler.FollowHandler'),
    ])
Пример #26
0
def init():
    global api, config, db
    config = load_config()
    api = load_api(config)
    db = load_db(config["neo4j"])