Esempio n. 1
0
def load_cfg():
    global params

    import os, sys
    from shinken.log import logger
    from config_parser import config_parser
    plugin_name = os.path.splitext(os.path.basename(__file__))[0]
    try:
        currentdir = os.path.dirname(os.path.realpath(__file__))
        configuration_file = "%s/%s" % (currentdir, 'plugin.cfg')
        logger.debug("Plugin configuration file: %s" % (configuration_file))
        scp = config_parser('#', '=')
        params = scp.parse_config(configuration_file)

        params['elts_per_page'] = int(params['elts_per_page'])

        logger.debug("WebUI plugin '%s', configuration loaded." %
                     (plugin_name))
        logger.debug("Plugin configuration, elts_per_page: %d" %
                     (params['elts_per_page']))

        return True
    except Exception, exp:
        logger.warning(
            "WebUI plugin '%s', configuration file (%s) not available: %s" %
            (plugin_name, configuration_file, str(exp)))
        return False
Esempio n. 2
0
def main():
    log.basicConfig(format='[ %(levelname)s ] %(message)s',
                    level=log.INFO,
                    stream=sys.stdout)
    try:
        config, executor_type = build_argparser()
        parser = config_parser(config)
        quantization_parameters = parser.parse()
        log.info('Start quantization on {}!'.format(executor_type))
        quantization(executor_type, quantization_parameters, log)
        log.info('End quantization!')
        parser.clean()
        log.info('Work is done!')
    except Exception as exp:
        log.error(str(exp))
        sys.exit(1)
Esempio n. 3
0
def load_cfg():
    global params
    
    import os,sys
    from config_parser import config_parser
    from shinken.log import logger
    plugin_name = os.path.splitext(os.path.basename(__file__))[0]
    try:
        currentdir = os.path.dirname(os.path.realpath(__file__))
        configuration_file = "%s/%s" % (currentdir, 'plugin.cfg')
        logger.debug("Plugin configuration file: %s" % (configuration_file))
        scp = config_parser('#', '=')
        params = scp.parse_config(configuration_file)

        logger.debug("WebUI plugin '%s', configuration loaded." % (plugin_name))
        return True
    except Exception, exp:
        logger.warning("WebUI plugin '%s', configuration file (%s) not available: %s" % (plugin_name, configuration_file, str(exp)))
        return False
Esempio n. 4
0
from shinken.log import logger

### Will be populated by the UI with it's own value
app = None

# Get plugin's parameters from configuration file
params = {}

import os,sys
from config_parser import config_parser
plugin_name = os.path.splitext(os.path.basename(__file__))[0]
try:
    currentdir = os.path.dirname(os.path.realpath(__file__))
    configuration_file = "%s/%s" % (currentdir, 'plugin.cfg')
    logger.debug("Plugin configuration file: %s" % (configuration_file))
    scp = config_parser('#', '=')
    params = scp.parse_config(configuration_file)

    params['default_Lat'] = float(params['default_Lat'])
    params['default_Lng'] = float(params['default_Lng'])
    params['default_zoom'] = int(params['default_zoom'])
    
    logger.debug("WebUI plugin '%s', configuration loaded." % (plugin_name))
    logger.debug("Plugin configuration, default position: %s / %s" % (params['default_Lat'], params['default_Lng']))
    logger.debug("Plugin configuration, default zoom level: %d" % (params['default_zoom']))
except Exception, exp:
    logger.warning("WebUI plugin '%s', configuration file (%s) not available: %s" % (plugin_name, configuration_file, str(exp)))


def checkauth():
    user = app.get_user_auth()
Esempio n. 5
0
def train():
    parser = config_parser()
    args = parser.parse_args()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    args.default_device = device
    if args.model_type not in [
            "nerf", "smpl_nerf", "append_to_nerf", "smpl", "warp",
            'vertex_sphere', "smpl_estimator", "original_nerf",
            'dummy_dynamic', 'image_wise_dynamic',
            "append_vertex_locations_to_nerf", 'append_smpl_params'
    ]:
        raise Exception("The model type ", args.model_type, " does not exist.")

    transform = transforms.Compose([
        NormalizeRGB(),
        CoarseSampling(args.near, args.far, args.number_coarse_samples),
        ToTensor()
    ])

    train_dir = os.path.join(args.dataset_dir, 'train')
    val_dir = os.path.join(args.dataset_dir, 'val')
    if args.model_type == "nerf":
        train_data = RaysFromImagesDataset(
            train_dir, os.path.join(train_dir, 'transforms.json'), transform)
        val_data = RaysFromImagesDataset(
            val_dir, os.path.join(val_dir, 'transforms.json'), transform)
    elif args.model_type == "smpl" or args.model_type == "warp":
        train_data = SmplDataset(train_dir,
                                 os.path.join(train_dir, 'transforms.json'),
                                 args,
                                 transform=NormalizeRGB())
        val_data = SmplDataset(val_dir,
                               os.path.join(val_dir, 'transforms.json'),
                               args,
                               transform=NormalizeRGB())
    elif args.model_type == "smpl_nerf" or args.model_type == "append_to_nerf" or args.model_type == "append_smpl_params":
        train_data = SmplNerfDataset(
            train_dir, os.path.join(train_dir, 'transforms.json'), transform)
        val_data = SmplNerfDataset(val_dir,
                                   os.path.join(val_dir, 'transforms.json'),
                                   transform)
    elif args.model_type == "vertex_sphere":
        train_data = VertexSphereDataset(
            train_dir, os.path.join(train_dir, 'transforms.json'), args)
        val_data = VertexSphereDataset(
            val_dir, os.path.join(val_dir, 'transforms.json'), args)
    elif args.model_type == "smpl_estimator":
        transform = NormalizeRGBImage()
        train_data = SmplEstimatorDataset(
            train_dir, os.path.join(train_dir, 'transforms.json'),
            args.vertex_sphere_radius, transform)
        val_data = SmplEstimatorDataset(
            val_dir, os.path.join(val_dir, 'transforms.json'),
            args.vertex_sphere_radius, transform)
    elif args.model_type == "original_nerf":
        train_data = OriginalNerfDataset(
            args.dataset_dir,
            os.path.join(args.dataset_dir, 'transforms_train.json'), transform)
        val_data = OriginalNerfDataset(
            args.dataset_dir,
            os.path.join(args.dataset_dir, 'transforms_val.json'), transform)
    elif args.model_type == "dummy_dynamic":
        train_data = DummyDynamicDataset(
            train_dir, os.path.join(train_dir, 'transforms.json'), transform)
        val_data = DummyDynamicDataset(
            val_dir, os.path.join(val_dir, 'transforms.json'), transform)
    elif args.model_type == "append_vertex_locations_to_nerf":
        train_data = DummyDynamicDataset(
            train_dir, os.path.join(train_dir, 'transforms.json'), transform)
        val_data = DummyDynamicDataset(
            val_dir, os.path.join(val_dir, 'transforms.json'), transform)
    elif args.model_type == 'image_wise_dynamic':
        canonical_pose1 = torch.zeros(38).view(1, -1)
        canonical_pose2 = torch.zeros(2).view(1, -1)
        canonical_pose3 = torch.zeros(27).view(1, -1)
        arm_angle_l = torch.tensor([np.deg2rad(10)]).float().view(1, -1)
        arm_angle_r = torch.tensor([np.deg2rad(10)]).float().view(1, -1)
        smpl_estimator = DummyImageWiseEstimator(canonical_pose1,
                                                 canonical_pose2,
                                                 canonical_pose3, arm_angle_l,
                                                 arm_angle_r,
                                                 torch.zeros(10).view(1, -1),
                                                 torch.zeros(69).view(1, -1))
        train_data = ImageWiseDataset(
            train_dir, os.path.join(train_dir, 'transforms.json'),
            smpl_estimator, transform, args)
        val_data = ImageWiseDataset(val_dir,
                                    os.path.join(val_dir, 'transforms.json'),
                                    smpl_estimator, transform, args)
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=args.batchsize,
                                               shuffle=True,
                                               num_workers=0)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=args.batchsize_val,
                                             shuffle=False,
                                             num_workers=0)
    position_encoder = PositionalEncoder(args.number_frequencies_postitional,
                                         args.use_identity_positional)
    direction_encoder = PositionalEncoder(args.number_frequencies_directional,
                                          args.use_identity_directional)
    model_coarse = RenderRayNet(args.netdepth,
                                args.netwidth,
                                position_encoder.output_dim * 3,
                                direction_encoder.output_dim * 3,
                                skips=args.skips)
    model_fine = RenderRayNet(args.netdepth_fine,
                              args.netwidth_fine,
                              position_encoder.output_dim * 3,
                              direction_encoder.output_dim * 3,
                              skips=args.skips_fine)

    if args.model_type == "smpl_nerf":
        human_pose_encoder = PositionalEncoder(args.number_frequencies_pose,
                                               args.use_identity_pose)
        positions_dim = position_encoder.output_dim if args.human_pose_encoding else 1
        human_pose_dim = human_pose_encoder.output_dim if args.human_pose_encoding else 1
        model_warp_field = WarpFieldNet(args.netdepth_warp, args.netwidth_warp,
                                        positions_dim * 3, human_pose_dim * 2)

        solver = SmplNerfSolver(model_coarse, model_fine, model_warp_field,
                                position_encoder, direction_encoder,
                                human_pose_encoder, train_data.canonical_smpl,
                                args, torch.optim.Adam, torch.nn.MSELoss())
        solver.train(train_loader, val_loader, train_data.h, train_data.w)

        save_run(solver.writer.log_dir,
                 [model_coarse, model_fine, model_warp_field],
                 ['model_coarse.pt', 'model_fine.pt', 'model_warp_field.pt'],
                 parser)

    elif args.model_type == 'smpl':
        solver = SmplSolver(model_coarse, model_fine, position_encoder,
                            direction_encoder, args, torch.optim.Adam,
                            torch.nn.MSELoss())
        solver.train(train_loader, val_loader, train_data.h, train_data.w,
                     parser)
        save_run(solver.writer.log_dir, [model_coarse, model_fine],
                 ['model_coarse.pt', 'model_fine.pt'], parser)

    elif args.model_type == 'nerf' or args.model_type == "original_nerf":
        solver = NerfSolver(model_coarse, model_fine, position_encoder,
                            direction_encoder, args, torch.optim.Adam,
                            torch.nn.MSELoss())
        solver.train(train_loader, val_loader, train_data.h, train_data.w,
                     parser)
        save_run(solver.writer.log_dir, [model_coarse, model_fine],
                 ['model_coarse.pt', 'model_fine.pt'], parser)

    elif args.model_type == 'warp':
        human_pose_encoder = PositionalEncoder(args.number_frequencies_pose,
                                               args.use_identity_pose)
        positions_dim = position_encoder.output_dim if args.human_pose_encoding else 1
        human_pose_dim = human_pose_encoder.output_dim if args.human_pose_encoding else 1
        model_warp_field = WarpFieldNet(args.netdepth_warp, args.netwidth_warp,
                                        positions_dim * 3, human_pose_dim * 2)
        human_pose_encoder = PositionalEncoder(args.number_frequencies_pose,
                                               args.use_identity_pose)
        solver = WarpSolver(model_warp_field, position_encoder,
                            direction_encoder, human_pose_encoder, args)
        solver.train(train_loader, val_loader, train_data.h, train_data.w)
        save_run(solver.writer.log_dir, [model_warp_field],
                 ['model_warp_field.pt'], parser)
    elif args.model_type == 'append_smpl_params':
        human_pose_encoder = PositionalEncoder(args.number_frequencies_pose,
                                               args.use_identity_pose)
        human_pose_dim = human_pose_encoder.output_dim if args.human_pose_encoding else 1

        model_coarse = RenderRayNet(
            args.netdepth,
            args.netwidth,
            position_encoder.output_dim * 3,
            direction_encoder.output_dim * 3,
            human_pose_dim * 69,
            skips=args.skips,
            use_directional_input=args.use_directional_input)
        model_fine = RenderRayNet(
            args.netdepth_fine,
            args.netwidth_fine,
            position_encoder.output_dim * 3,
            direction_encoder.output_dim * 3,
            human_pose_dim * 69,
            skips=args.skips_fine,
            use_directional_input=args.use_directional_input)

        if args.load_run is not None:
            model_coarse.load_state_dict(
                torch.load(os.path.join(args.load_run, 'model_coarse.pt'),
                           map_location=torch.device(device)))
            model_fine.load_state_dict(
                torch.load(os.path.join(args.load_run, 'model_fine.pt'),
                           map_location=torch.device(device)))
            print("Models loaded from ", args.load_run)
        if args.siren:
            model_coarse = SirenRenderRayNet(
                args.netdepth,
                args.netwidth,
                position_encoder.output_dim * 3,
                direction_encoder.output_dim * 3,
                human_pose_dim * 69,
                skips=args.skips,
                use_directional_input=args.use_directional_input)
            model_fine = SirenRenderRayNet(
                args.netdepth_fine,
                args.netwidth_fine,
                position_encoder.output_dim * 3,
                direction_encoder.output_dim * 3,
                human_pose_dim * 69,
                skips=args.skips_fine,
                use_directional_input=args.use_directional_input)
        solver = AppendSmplParamsSolver(model_coarse, model_fine,
                                        position_encoder, direction_encoder,
                                        human_pose_encoder, args,
                                        torch.optim.Adam, torch.nn.MSELoss())
        solver.train(train_loader, val_loader, train_data.h, train_data.w,
                     parser)

        save_run(solver.writer.log_dir, [model_coarse, model_fine],
                 ['model_coarse.pt', 'model_fine.pt'], parser)

        model_dependent = [human_pose_encoder, human_pose_dim]
        inference_gif(solver.writer.log_dir, args.model_type, args, train_data,
                      val_data, position_encoder, direction_encoder,
                      model_coarse, model_fine, model_dependent)
    elif args.model_type == 'append_to_nerf':
        human_pose_encoder = PositionalEncoder(args.number_frequencies_pose,
                                               args.use_identity_pose)
        human_pose_dim = human_pose_encoder.output_dim if args.human_pose_encoding else 1
        model_coarse = RenderRayNet(
            args.netdepth,
            args.netwidth,
            position_encoder.output_dim * 3,
            direction_encoder.output_dim * 3,
            human_pose_dim * 2,
            skips=args.skips,
            use_directional_input=args.use_directional_input)
        model_fine = RenderRayNet(
            args.netdepth_fine,
            args.netwidth_fine,
            position_encoder.output_dim * 3,
            direction_encoder.output_dim * 3,
            human_pose_dim * 2,
            skips=args.skips_fine,
            use_directional_input=args.use_directional_input)
        solver = AppendToNerfSolver(model_coarse, model_fine, position_encoder,
                                    direction_encoder, human_pose_encoder,
                                    args, torch.optim.Adam, torch.nn.MSELoss())
        solver.train(train_loader, val_loader, train_data.h, train_data.w,
                     parser)

        save_run(solver.writer.log_dir, [model_coarse, model_fine],
                 ['model_coarse.pt', 'model_fine.pt'], parser)

        model_dependent = [human_pose_encoder, human_pose_dim]
        inference_gif(solver.writer.log_dir, args.model_type, args, train_data,
                      val_data, position_encoder, direction_encoder,
                      model_coarse, model_fine, model_dependent)
    elif args.model_type == 'append_vertex_locations_to_nerf':
        model_coarse = AppendVerticesNet(args.netdepth,
                                         args.netwidth,
                                         position_encoder.output_dim * 3,
                                         direction_encoder.output_dim * 3,
                                         6890,
                                         additional_input_layers=1,
                                         skips=args.skips)
        model_fine = AppendVerticesNet(args.netdepth_fine,
                                       args.netwidth_fine,
                                       position_encoder.output_dim * 3,
                                       direction_encoder.output_dim * 3,
                                       6890,
                                       additional_input_layers=1,
                                       skips=args.skips_fine)
        smpl_estimator = DummySmplEstimatorModel(train_data.goal_poses,
                                                 train_data.betas)
        smpl_file_name = "SMPLs/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl"
        smpl_model = smplx.create(smpl_file_name, model_type='smpl')
        smpl_model.batchsize = args.batchsize
        solver = AppendVerticesSolver(model_coarse, model_fine, smpl_estimator,
                                      smpl_model, position_encoder,
                                      direction_encoder, args,
                                      torch.optim.Adam, torch.nn.MSELoss())
        solver.train(train_loader, val_loader, train_data.h, train_data.w)

        save_run(solver.writer.log_dir, [model_coarse, model_fine],
                 ['model_coarse.pt', 'model_fine.pt'], parser)

    elif args.model_type == 'vertex_sphere':
        solver = VertexSphereSolver(model_coarse, model_fine, position_encoder,
                                    direction_encoder, args, torch.optim.Adam,
                                    torch.nn.MSELoss())
        solver.train(train_loader, val_loader, train_data.h, train_data.w)
        save_run(solver.writer.log_dir, [model_coarse, model_fine],
                 ['model_coarse.pt', 'model_fine.pt'], parser)

    elif args.model_type == 'smpl_estimator':

        model = SmplEstimator(human_size=len(args.human_joints))

        solver = SmplEstimatorSolver(model, args, torch.optim.Adam,
                                     torch.nn.MSELoss())
        solver.train(train_loader, val_loader)
        save_run(solver.writer.log_dir, [model], ['model_smpl_estimator.pt'],
                 parser)
    elif args.model_type == "dummy_dynamic":
        smpl_file_name = "SMPLs/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl"
        smpl_model = smplx.create(smpl_file_name, model_type='smpl')
        smpl_model.batchsize = args.batchsize
        smpl_estimator = DummySmplEstimatorModel(train_data.goal_poses,
                                                 train_data.betas)
        solver = DynamicSolver(model_fine, model_coarse, smpl_estimator,
                               smpl_model, position_encoder, direction_encoder,
                               args)
        solver.train(train_loader, val_loader, train_data.h, train_data.w)
        save_run(solver.writer.log_dir,
                 [model_coarse, model_fine, smpl_estimator],
                 ['model_coarse.pt', 'model_fine.pt', 'smpl_estimator.pt'],
                 parser)
    elif args.model_type == "image_wise_dynamic":
        if args.load_coarse_model != None:
            print("Load model..")
            model_coarse.load_state_dict(
                torch.load(args.load_coarse_model,
                           map_location=torch.device(device)))
            for params in model_coarse.parameters():
                params.requires_grad = False
            model_coarse.eval()
        train_loader = torch.utils.data.DataLoader(train_data,
                                                   batch_size=1,
                                                   shuffle=True,
                                                   num_workers=0)
        val_loader = torch.utils.data.DataLoader(val_data,
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=0)
        smpl_file_name = "SMPLs/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl"
        smpl_model = smplx.create(smpl_file_name, model_type='smpl')
        smpl_model.batchsize = args.batchsize
        solver = ImageWiseSolver(model_coarse, model_fine, smpl_estimator,
                                 smpl_model, position_encoder,
                                 direction_encoder, args)
        solver.train(train_loader, val_loader, train_data.h, train_data.w)
        save_run(solver.writer.log_dir,
                 [model_coarse, model_fine, smpl_estimator],
                 ['model_coarse.pt', 'model_fine.pt', 'smpl_estimator.pt'],
                 parser)
Esempio n. 6
0
from TS_analysis import TS_Analysis
from LSTM_model import LSTMModel
from SARIMAX_model import SARIMAXModel
import os
import sys
import yaml
import warnings
import logging


def main(args, config):
    data, df = DataLoader(args).process_data()
    if args.cluster:
        clusterAnalysis(args, df)
    #TS_Analysis(args, data)
    if args.method == 'LSTM':
        LSTMModel(args, config).fit_predict(data)
    elif args.method == 'SARIMAX':
        SARIMAXModel(args, config).fit_predict(data)
    #else:
    #SARIMAXModel(args, config).model_sarimax()
    #LSTMModel(args, config).fit_predict(data)
    logging.info("Done")


if __name__ == "__main__":
    args = arg_parser(sys.argv)
    config = config_parser(args)
    logger = logging.getLogger(__file__)
    main(args, config)
Esempio n. 7
0
def inference():
    parser_training = config_parser()
    parser_training.add_argument('--inf_run_dir',
                                 default="runs/Aug25_08-40-13_korhal",
                                 help='path to load model')
    parser_training.add_argument(
        '--inf_ground_truth_dir',
        default="data/sequence_1/val",
        help='path to load ground truth, created with create_dataset.py')
    parser_training.add_argument(
        '--inf_model_type',
        default="append_smpl_params",
        type=str,
        help=
        'choose dataset type for model [smpl_nerf, nerf, pix2pix, smpl, append_to_nerf]'
    )
    parser_training.add_argument(
        '--inf_save_dir',
        default="renders_test",
        help='save directory for inference output (appended to run_dir')
    parser_training.add_argument('--inf_batchsize',
                                 default=800,
                                 type=int,
                                 help='Batch size for inference')
    #config_file_training = os.path.join(args_training.inf_run_dir, "config.txt")
    #parser_training.add_argument('--config2', is_config_file=True,
    #                 default=config_file_training, help='config file path')
    args_training = parser_training.parse_args()
    print("Evaluate Run: ", args_training.inf_run_dir)
    print("On data: ", args_training.inf_ground_truth_dir)
    print("Experiment: ", args_training.experiment_name)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if torch.cuda.is_available():
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    pipeline, data_loader, dataset = setup_pipeline_dataloader(
        args_training, device)
    rgb_images_renders = []
    rgb_images_truth = []
    camera_transforms = dataset.image_transform_map
    for i, data in enumerate(tqdm(data_loader)):
        for j, element in enumerate(data):
            data[j] = element.to(device)
        rgb_truth = data[-1]
        out = pipeline(data)
        rgb_fine = out[1]
        rgb_images_renders.append(rgb_fine.detach().cpu())
        rgb_images_truth.append(rgb_truth.detach().cpu())
    rgb_images_renders = torch.cat(rgb_images_renders).reshape(
        (len(camera_transforms), dataset.h, dataset.w, 3))
    rgb_images_truth = torch.cat(rgb_images_truth).reshape(
        (len(camera_transforms), dataset.h, dataset.w, 3))
    # calculate scores
    print_scores(rgb_images_renders.permute(0, 3, 1, 2),
                 rgb_images_truth.permute(0, 3, 1, 2))
    # save renders
    rgb_images_renders = np.concatenate(rgb_images_renders.numpy(), 0).reshape(
        (len(camera_transforms), dataset.h, dataset.w, 3))
    rgb_images_renders = np.clip(rgb_images_renders, 0, 1) * 255
    rgb_images_renders = rgb_images_renders.astype(np.uint8)
    rgb_images_renders = rgb_images_renders[..., ::-1]
    save_rerenders(rgb_images_renders, args_training.inf_run_dir,
                   args_training.inf_save_dir)
    return rgb_images_renders
Esempio n. 8
0
from config_parser import config_parser
from arping import arping, device
from dispatcher import dispatcher

config_file = "pipaalarm.ini"
config = config_parser(config_file)

arping = arping(ip_range = config.getScanRange())
arping.monitored_devices |= (
    set([device("", d[1]["mac"]) for d in config.getClients()])
)
dispatcher = dispatcher(config, arping.warnings)