def _decompose_movie(img_vid, caffemodel_abs, results_dir, args):
    images, stats = load_movie(img_vid, args)
    num_images = images.shape[0]
    orig_filename = os.path.basename(img_vid)[:-4]
    full_path = os.path.join(results_dir,
                             'decompositions_sRGB',
                             orig_filename + '.mp4')

    # generate baseline reflectance video
    save_movie_baseline(full_path, images, stats)

    # set network size to movie size
    args.height = stats[1]
    args.width = stats[0]
    net = create_network(args)
    net.load_blobs_from(caffemodel_abs)

    print("Decompose movie")
    start_predicting = timeit.default_timer()
    results = net.predict(images,
                          post_batch_callbacks=[ProgressIndicator()],
                          out_blob_names=['reflectance', 'shading'])
    stop_predicting = timeit.default_timer()

    prediction_time = stop_predicting-start_predicting
    print("Predicting", num_images, "frames took",
          prediction_time, "seconds, i.e., ",
          prediction_time/num_images, "per frame and",
          num_images/prediction_time, "fps.")

    reflectances = results['reflectance']
    shadings = results['shading']
    save_movie_combined(full_path, images, reflectances, shadings, stats)
    save_movie_separate(full_path, images, reflectances, shadings, stats)
def main():
    """
    This method will initialize the scale test deployment based on the global \
    config parameters mentioned on config.py file
    """

    print "\n\n"
    print_scale_test_config()
    print "\n\n"
    print "Starting Scale Test Deployment"
    for i in range(NETWORK_COUNT):
        i += 1
        network_name = NETWORK_NAME_PREFFIX + "_" + str(i)
        network_cidr = str(i) + "." + str(i) + "." + str(i) + ".0/24"
        create_network(network_name, network_cidr)
    print "=" * 50
    print "\n\n"
    print "Scale Test Deployment Completed"
    print "\n\n"
Example #3
0
def main():
    """
    This method will initialize the scale test deployment based on the global \
    config parameters mentioned on config.py file
    """

    print "\n\n"
    print_scale_test_config()
    print "\n\n"
    print "Starting Scale Test Deployment"
    for i in range(NETWORK_COUNT):
        i += 1
        network_name = NETWORK_NAME_PREFFIX + '_' + str(i)
        network_cidr = str(i) + "." + str(i) + "." + str(i) + ".0/24"
        create_network(network_name, network_cidr)
    print "=" * 50
    print "\n\n"
    print "Scale Test Deployment Completed"
    print "\n\n"
def _decompose_single_image_in_full_size(img_vid, caffemodel_abs,
                                         results_dir, args):
    img = _read_img(img_vid)

    # set network size to image size
    args.height, args.width = img.shape[1:]
    net = create_network(args)
    net.load_blobs_from(caffemodel_abs)

    # get decomposition of blob
    reflectance, shading, RS_est = _decompose_3d_blob(img, net)

    # output name is input name (in another folder)
    orig_filename = os.path.basename(img_vid)[:-4]
    # in the case they all have the same name (e.g. in MIT intrinsic), prepend
    # the directory name:
    # dirname = os.path.basename(os.path.dirname(img_vid))
    # orig_filename = dirname + '_' + os.path.basename(img_vid)[:-4]

    # png actually does not look that much better than jpg
    # but is way bigger file size (since lossless compression).
    # IIW decompositions are all given in png

    img_format = '.png'
    # img_format = '.jpg'

    # decompose without scaling in linear
    full_path = os.path.join(results_dir,
                             'decompositions_linear',
                             orig_filename + '-r' + img_format)
    _save_img(full_path, reflectance, scale2Max=False, convert2sRGB=False)
    full_path = os.path.join(results_dir,
                             'decompositions_linear',
                             orig_filename + '-s' + img_format)
    _save_img(full_path, shading, scale2Max=False, convert2sRGB=False)
    full_path = os.path.join(results_dir,
                             'decompositions_linear',
                             orig_filename + '-RS_est' + img_format)
    _save_img(full_path, RS_est, scale2Max=False, convert2sRGB=False)

    # and in sRGB
    full_path = os.path.join(results_dir,
                             'decompositions_sRGB',
                             orig_filename + '-r' + img_format)
    _save_img(full_path, reflectance, scale2Max=False, convert2sRGB=True)
    full_path = os.path.join(results_dir,
                             'decompositions_sRGB',
                             orig_filename + '-s' + img_format)
    _save_img(full_path, shading, scale2Max=False, convert2sRGB=True)
    full_path = os.path.join(results_dir,
                             'decompositions_sRGB',
                             orig_filename + '-RS_est' + img_format)
    _save_img(full_path, RS_est, scale2Max=False, convert2sRGB=True)
    def __init__(self,
                 memory: BaseMemory,
                 img_size: Tuple,
                 nov_thresh: float = 0.25,
                 novelty_loss_type: str = 'MSE',
                 train_epochs_per_iter: int = 1,
                 learning_rate: float = 0.001):
        """Initializes the Brain by creating CNN and AE
        
        Args:
            memory: BaseMemory
                A memory object that implements BaseMemory  (such as PriorityBasedMemory)
            img_size: Tuple
                The image size of each grain from the agent's field of view
            nov_thresh : float
                (Currently deprecated). The novelty cutoff used in training
            novelty_loss_type: str
                A string indicating which novelty function to use (MSE or MAE)
            train_epochs_per_iter: int
                Number of epochs to train for in a single training session
            learning_rate: float
                Learning rate for neural network optimizer
        """

        assert train_epochs_per_iter > 0

        self._memory = memory
        self._img_size = img_size
        self._train_epochs_per_iter = train_epochs_per_iter
        self._nov_thresh = nov_thresh
        self._batch_size = 4
        self._novelty_loss_type = novelty_loss_type
        self._learning_rate = learning_rate

        self._loss_functions = { \
            "mae": tf.keras.losses.MeanAbsoluteError(), \
            "mse": tf.keras.losses.MeanSquaredError(), \
        }

        if novelty_loss_type.lower() not in self._loss_functions:
            print("Novelty loss type not recognized. Exiting.")
            exit(1)

        self.novelty_function = self._loss_functions[novelty_loss_type.lower()]

        # Create network and optimizer
        self._network = networks.create_network(img_size)
        self._optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
def _decompose_numpy(img_vid, caffemodel_abs, results_dir, args):
    # read input numpy file. Expect input in 'images' to have shape:
    # (num_images, height, width, channels)
    with np.load(img_vid) as npzFile:
        images = npzFile['images']
    # print(images.shape)

    # convert to blob for caffe (assume once linear, and once sRGB)
    input_as_is = np.transpose(images/255, (0, 3, 1, 2))
    # print(input_as_is.shape)

    # set network size to image size
    args.height, args.width = input_as_is.shape[2:]
    net = create_network(args)
    net.load_blobs_from(caffemodel_abs)

    # get decomposition of blob (once assumed as linear, once as sRGB)
    # unchanged (when assuming input to be linear, output will be kept that)
    R_from_input, S_from_input, r_from_input = _decompose_4d_blob(input_as_is,
                                                                  net)
    R_from_input = np.transpose(R_from_input, (0, 2, 3, 1))
    S_from_input = np.transpose(S_from_input, (0, 2, 3, 1))
    r_from_input = np.transpose(r_from_input, (0, 2, 3, 1))
    # when converting input from sRGB, undo with R and S
    input_converted_to_linear = srgb_to_rgb(input_as_is)
    R, S, r = _decompose_4d_blob(input_converted_to_linear, net)
    R_back_to_sRGB = np.transpose(rgb_to_srgb(R), (0, 2, 3, 1))
    S_back_to_sRGB = np.transpose(rgb_to_srgb(S), (0, 2, 3, 1))
    r_back_to_sRGB = np.transpose(rgb_to_srgb(r), (0, 2, 3, 1))

    # save as numpy file back to where original filename was
    np.savez_compressed(img_vid[:-4] + '_decomposed.npz',
                        images=images,
                        R_back_to_sRGB=R_back_to_sRGB,
                        S_back_to_sRGB=S_back_to_sRGB,
                        r_back_to_sRGB=r_back_to_sRGB,
                        R_from_input=R_from_input,
                        S_from_input=S_from_input,
                        r_from_input=r_from_input,
                        )
Example #7
0
def main():
    args = TrainOptions().parse()
    device = torch.device('cuda') if (not args.no_cuda and torch.cuda.is_available()) else torch.device('cpu')
    if not os.path.exists(args.output):
        os.makedirs(args.output)

    env = create_env(args)

    network = create_network(args, env.action_space.n, env.observation_space.shape)
    network.to(device)
    optimizer = Adam(network.parameters(), lr=args.lr)

    policy = AnnealedEpsilonGreedyPolicy(epsilon_max=args.epsilon_max,
                                         epsilon_min=args.epsilon_min, exploration_steps=args.exp_steps)
    memory = SimpleExperienceReplay(max_size=args.mem_max, batch_size=args.batch_size)
    logger = Logger()

    agent = create_agent(args, env, network, policy, memory, optimizer, logger)

    # train agent
    agent.learn(n_episodes=args.n_ep, ep_max_step=args.ep_max_step, replay_start_size=args.replay_start,
                save_every=args.freq_save_model, update_target_every=args.freq_target_update, render_every=args.freq_render)
def fit_predict_net(args, results_dir):
    """Train or test the network."""
    caffe.set_mode_gpu()  # run on GPU
    # collection_name = 'cluster'  # not used in code release

    net_params, description = get_description(args)

    snapshot_dir = os.path.join(results_dir, 'snapshots')
    draw_net_filename = os.path.join(results_dir, 'networks',
                                     net_params + '.png')

    additional_info = '_{}_{}_{}'.format(args.height, args.width,
                                         args.sRGB_linear)

    def getData(description):
        """Wrapper around the external getData using the given args."""
        return _getData(args.dataset,
                        description + additional_info,
                        args.comparisonsType)

    # progress indicator
    # progress = WHDRProgressIndicator(1, 1, 1)
    # progress = WHDRProgressIndicator(50, 100, 0.01)
    # the scales are just for human display, not for loss computation!
    progress = WHDRProgressIndicator(args.loss_scale_whdr,
                                     args.loss_scale_boundaries01,
                                     args.loss_scale_lambert)

    flags_fit = ['fit', 'f', 'train']
    flags_predict = ['predict', 'p', 'test', 'val']

    if args.stage in (flags_fit + flags_predict):
        print("Descriptive string:", description)
        # create the network of type networkType

        # print("Create network.")
        net = create_network(args, draw_net_filename)
        # print("Network created.")

        # if len(args.iterations) > 1:
        #     print("Use only one number of iterations for the maximum number"
        #           "of iterations to train. Will use the one provided last")
        # iterations = args.iterations[-1]

        iterations = args.iterations
        if args.iterations is None:
            if args.stage in flags_fit:
                raise Exception("Number of iterations was not set!")
            else:
                iterations = 1  # dummy

        # get data
        if not args.test:
            if args.stage in flags_fit:
                # no test mode, stage fit
                X = getData('trainValTest_train')
                # print("CHANGE AGAIN, READING DUMMY DATA!!!!!!!!!!!!!!!!!!!!")
                # X = getData('dummy_train')
                # print("CHANGE AGAIN, READING DUMMY DATA!!!!!!!!!!!!!!!!!!!!")
            # no test mode, stage predict and fit
            X_val = getData('trainValTest_val')
        else:  # --test=1
            if args.stage in flags_fit:
                # test mode, stage fit

                # train = trainValTest_train + trainValTest_val
                # X = getData('train')

                # instead use a most of the previous validation set for
                # training and leave only small validation
                X = getData('bigTrainMiniValTest_train')
                # as described above, bigger train set, small validation
                X_val = getData('bigTrainMiniValTest_val')
            elif args.stage in flags_predict:
                # test mode, stage predict
                # test data is only used when in --test=1 --stage=predict
                X_val = getData('trainValTest_test')


        # Define result extractor
        log_results = ['whdr_original',
                       'loss_whdr_hinge',
                       'whdr_original_level0',
                       'loss_whdr_hinge_level0',
                       'loss_boundaries01',
                       'loss_boundaries_reflectance',
                       'loss_boundaries_shading',
                       'loss_lambert']
        result_extractors = []
        for log_blob in log_results:
            result_extractors.append(ResultExtractor(log_blob, log_blob))
        combineLosses = CombineLosses(args.loss_scale_whdr,
                                      args.loss_scale_lambert)
        result_extractors.append(combineLosses)
        log_results.append('loss_combined')  # to tell the JSONLogger

        # Snapshots
        # snapshots_prefix = os.path.join(snapshot_dir, description + '_')
        snapshots_prefix = os.path.join(snapshot_dir, description)
        # print("snapshots prefix:", snapshots_prefix)
        checkpoint_interval = min(args.checkpoint_interval, iterations)
        print("Checkpointing every", args.checkpoint_interval, "iterations.")

        # Run the training.
        solver = _get_solver(args, snapshots_prefix)

        # print('name_prefix', snapshots_prefix,
        #       'iterations', checkpoint_interval)
        checkptr = CheckpointerIncludingRename(name_prefix=snapshots_prefix,
                                               iterations=checkpoint_interval)
        # json logs
        # filename will be prefixed with barrista_ and .json will be appended
        # json_log = JSONLogger(os.path.join(results_dir, 'logs'),
        #                       description + '_' + str(iterations),
        #                       {'train': logging})

        train_cb = list(result_extractors)
        # train_cb.append(json_log)  # for now do not use the json logger
        train_cb.append(checkptr)
        # test_cb = list(result_extractors)
        # test_cb.extend([json_log])

        train_cb.append(progress)
        # test_cb.append(progress)

        # print("Log the following blobs:")
        # for re in result_extractors:
        #     print('key:', re._cbparam_key, 'and layer_name:', re._layer_name)

        if args.stage in flags_fit:
            running_average = RunningAverage(X['images'].shape[0],
                                             args.batch_size)
            train_cb.append(running_average)

            start_train = timeit.default_timer()
            # print("Testing every", args.test_interval, "iterations.")
            print("Starting the training for", iterations, "iterations.")
            # flush the stdout (write content to file in cluster) for debugging
            sys.stdout.flush()

            if args.startOver:
                if args.predictCaffemodel:
                    print("Load initial weights from:", args.predictCaffemodel)
                    net.load_blobs_from(args.predictCaffemodel)

                net.fit(iterations,  # number of iterations
                        solver,
                        X,
                        # test_interval=args.test_interval,
                        # X_val=X_val,
                        train_callbacks=train_cb,
                        # test_callbacks=test_cb,
                        allow_test_phase_for_train=True,
                        )
            end_train = timeit.default_timer()
            training_time = end_train-start_train
            print("Total training time on node", platform.node(),
                  "is", training_time)

            # in the end evaluate the final model
            curr_iter = iterations
            cm = '_barrista_iter_{}.caffemodel'.format(curr_iter)
            caffemodel = description + cm
            print("Now predict data from val and evaluate the WHDR on it.")
            score = _predictCaffemodel(X_val, net, caffemodel, results_dir,
                                       args)

            args.score = score
            args.datetime = datetime.datetime.now()
            args.training_time = training_time
            # write everything into database
            # write_to_database(collection_name, args)

            # and evaluate all intermediate models
            print("Test all intermediate caffemodels.")
            json_val = []
            json_train = []
            scores = []
            for i in range(checkpoint_interval,  # start from first trained
                           iterations+1,
                           checkpoint_interval):
                curr_iter = i
                cm = '_barrista_iter_{}.caffemodel'.format(curr_iter)
                caffemodel = description + cm
                # save progression of val
                val_score = _predictCaffemodel(X_val,
                                               net,
                                               caffemodel,
                                               results_dir,
                                               args)
                json_val.append({"NumIters": curr_iter,
                                 "WHDR": val_score
                                 })
                # # if you also want to see the progression of train
                # # (takes longer!)
                # train_score = _predictCaffemodel(X,
                #                                  net,
                #                                  caffemodel,
                #                                  results_dir,
                #                                  args)
                # json_train.append({"NumIters": curr_iter,
                #                    "WHDR": train_score
                #                    })

                # # also insert into database
                # args.iterations = curr_iter
                # args.score = val_score
                # args.datetime = datetime.datetime.now()
                # write_to_database(collection_name, args)

                scores.append(val_score)

                print("Ran iteration", i, "of", iterations,
                      "with validation score", val_score)
                sys.stdout.flush()

            filename = os.path.join(results_dir, 'progressions',
                                    "barrista_" + description + ".json")
            with open(filename, 'w') as outfile:
                json.dump({"test": json_val, "train": json_train}, outfile)

            print("Final score in % (the best one):")
            # print(score)
            print(min(scores))

        if args.predictCaffemodel and args.stage in flags_predict:
            # X_val = getData('dummy_val')  # should already be loaded

            # parse parameters for network from filename
            caffemodel_abs = args.predictCaffemodel
            caffemodel_rel = os.path.basename(args.predictCaffemodel)
            desc_split = caffemodel_rel.split('_')
            # print("split description:", desc_split)
            args.networkType = desc_split[0]
            args.numLayers = int(desc_split[1][1:])
            args.num_filters_log = int(np.log2(int(desc_split[2][1:])))
            args.kernel_pad = int((int(desc_split[3][1:]) - 1) / 2)
            args.RS_est_mode = desc_split[6]
            args.whdr_delta_margin_ratio_dense = (desc_split[7][3:] + '_' +
                                                  desc_split[8] + '_' +
                                                  desc_split[9] + '_' +
                                                  desc_split[10])
            args.iterations = int(desc_split[-1][:-11])
            print("Inferred parameters:",
                  "\nnetworkType:", args.networkType,
                  "\nnumLayers:", args.numLayers,
                  "\nkernel_pad:", args.kernel_pad,
                  "\nnum_filters_log", args.num_filters_log,
                  "\nRS_est_mode:", args.RS_est_mode,
                  "\niterations:", args.iterations,
                  "\nwhdr_delta_mar_r_d:", args.whdr_delta_margin_ratio_dense,
                  )

            scores = []

            if args.decompose:
                print("Decompose input")

                files_to_decompose = []
                for entry in args.decompose:
                    if os.path.isfile(entry):
                        files_to_decompose.append(entry)
                    elif os.path.isdir(entry):
                        # files_to_decompose.extend(os.listdir(entry))
                        for f in os.listdir(entry):
                            files_to_decompose.append(os.path.join(entry, f))
                    else:
                        print(entry, "is neither a file nor folder")

                for i, img_vid in enumerate(tqdm(files_to_decompose)):
                    try:
                        if is_image(img_vid):
                            # old version, can be removed (does resizing to
                            # 256 x 256 and then upscales again)
                            # _decompose_images(img_vid, caffemodel_abs,
                            #                   results_dir, args)
                            # new version (does decomp.in full resolution)
                            _decompose_single_image_in_full_size(img_vid,
                                                                 caffemodel_abs,
                                                                 results_dir,
                                                                 args)
                        elif is_movie(img_vid):
                            _decompose_movie(img_vid, caffemodel_abs,
                                             results_dir, args)
                        elif is_numpy(img_vid):
                            _decompose_numpy(img_vid, caffemodel_abs,
                                             results_dir, args)
                        else:
                            print("\nFile", img_vid,
                                  "neither recognized as image, nor movie")
                    except:
                        print("Decomposing file",
                              img_vid,
                              "was not possible")
                        traceback.print_exc()
                return
    else:
        msg = "stage '{}' is currently not implemented!".format(args.stage)
        raise Exception(msg)
def main():
    """
    This method will initialize the scale test deployment based on the global \
    config parameters mentioned on config.py file and creates the router with \
    external gateway connectivity to public network.
    """

    tenant_data = []
    router_data = []
    test_data = []
    test_count={}
    if TENANT_CREATION == True:
        print "\n"
        print_scale_test_config()
        print "\n"
        pdb.set_trace()
        print "Starting Scale Test Deployment"
        index = TENANT_BASE_INDEX
        for i in range(1, TENANT_COUNT + 1):
            tenant_name = TENANT_NAME_PREFIX + '-' + str(index)
            index += 1
            try:
                tenant_data.append(create_tenant(tenant_name))
            except Exception as exc:
                print "Exception occured on Tenant Creation: %s" % (exc.message)
                pass
        test_count["tenant_count"]=len(tenant_data)
        for tenant in tenant_data:
            
            #test= {'count': {'tenant_count': len(tenant_data)}}
            #test_count.append(test)
            router_dict = {}
            prefix = tenant['tenant_name']
            router_name = prefix + '-router'
            router_dict['tenant_name'] = tenant['tenant_name']
            router_dict['router_name'] = router_name
            router_dict['network_vlan'] = {}
            router_dict['router_detail'] = {}
            router_dict['ipnatpool_data'] = {}
            router_dict['iproute_data'] = {}
            router_dict['interface_data'] = []
            router_dict['nat_data'] = []

            router = neutron.list_routers(name=EXTERNAL_NETWORK)['routers']
            if not router:
                router_info = neutron.create_router({'router': {
                    'name': router_name, 'tenant_id': tenant['tenant_id']}})
                router = router_info['router']
                status = True
            elif router[0]['tenant_id'] == tenant['tenant_id']:
                router = router[0]
                status = True
            else:
                router = {}
                status = False

        router_dict['router_status'] = status
        networks = neutron.list_networks(name=EXTERNAL_NETWORK)
        network_id = networks['networks'][0]['id']
        neutron.add_gateway_router(router['id'],
                                   {'network_id': network_id,
                                    'tenant_id': tenant['tenant_id']})
        router_id = router['id']
        print('   - Created Router %s' % router['name'])

        for i in range(1, NETWORK_COUNT + 1):
            network_index = str(i)
            network_cidr = str(i) + "." + str(i) + "." + str(i) + ".0/24"
            test_data.append(create_network(tenant, router,
                             network_index, network_cidr))
        network_vlan = {}
        for entry in test_data:
            network_vlan[str(entry['network_data']['network_vlan_id'])] = \
                entry['network_data']['network_name']
        router_dict['network_vlan'] = network_vlan

    else:
        print "\n"
        print "=" * 50
        print "Discovering Tenant Topology on Scale Test Deployment"
        print "=" * 50
        try:
            pdb.set_trace()
            for i in range(len(TENANT_NAME)):
                tenant_name = TENANT_NAME[i]
                tenant_data.append(discover_tenant(tenant_name))
                #test= {'count': {'tenant_count': len(tenant_data)}}
                #test_count.append(test)
                
                for tenant in tenant_data:
                    router_dict = {}
                    prefix = tenant['tenant_name']
                    router_name = prefix + '-router'
                    router_dict['tenant_name'] = tenant['tenant_name']
                    router_dict['tenant_id'] = tenant['tenant_id']
                    router_dict['router_name'] = router_name
                    router_dict['network_vlan'] = {}
                    router_dict['router_detail'] = {}
                    router_dict['ipnatpool_data'] = {}
                    router_dict['iproute_data'] = {}
                    router_dict['interface_data'] = []
                    router_dict['nat_data'] = []
                pdb.set_trace()
                router = neutron.list_routers(name=router_name)['routers'][0]
                if router['tenant_id'] == tenant['tenant_id']:
                    print('   - Router %s Discovered' % router_name)
                    status = True
                else:
                    print('   - Router %s Not Found' % router_name)
                    status = False
                router_id = router['id']
                router_dict['router_status'] = status
                router_data.append(router_dict)
                #test1= {'count': {'router_count': len(router_data)}}
                #test_count.append(test1)  
                test_count["router_count"]=len(router_data)              
                for i in range(1, NETWORK_COUNT + 1):
                    network_index = str(i)
                    network_cidr = str(i) + "." + str(i) + "." + str(i) + ".0/24"
                    network_name = prefix + '-net-' + network_index
                    subnet_name = prefix + "-subnet-" + network_index
                    ins_data = [] 
                    networks = neutron.list_networks(name=network_name)['networks']
                    for i in range(len(networks)):
                        if networks[i]['tenant_id'] == tenant['tenant_id']:
                            network_id = networks[i]['id']
                            network_vlan = networks[i]['provider:segmentation_id']
                            print('   - Network %s Discovered' % network_name)
                            print('   - Network ID %s Discovered' % network_id)
                            print('   - VLAN ID %s Discovered' % network_vlan)
                            status = True
                            for j in range(1, VM_COUNT + 1):
                                vm_name = network_name + '-vm-' + str(j)
                                ins_data.append(discover_vm_on_network(tenant['tenant_name'], vm_name, network_id))
                        else:
                            print('   - Network %s Not Found' % network_name)
                            status = False
 
                    result = {'network_data': {'tenant_name': tenant['tenant_name'],
                                               'network_name': network_name,
                                               'network_cidr': network_cidr,
                                               'subnet_name': subnet_name,
                                               'network_id': network_id,
                                               'network_vlan_id': network_vlan,
                                               'status':status},
                              'instance_data': ins_data}
                    test_data.append(result)
                    test_count["network_count"]=test_data
                
        except Exception:
            print "\n"
            print('   - Tenant %s Not Found' % tenant_name)


    print "\n"
    print "=" * 50
    print "Scale Test Discovery Completed"
    print "=" * 50
    
    print "*" * 80
    print "Scale Test Deployment OpenStack Report"
    print "*" * 80

    print "\n"
    print "           Tenant Discovery Results      "
    print print_tenant_info(tenant_data)
    print "\n"
    print "           Router Discovery Results      "
    print print_router_info(router_data)
    print "\n"
    print "                 Network Discovery Results      "
    print print_network_info(test_data)
    print "\n"
    
    print "            Instance Discovery Results      "
    print print_instance_info(test_data)
    print "\n"
    
    print "             Tenant Name, Router Name, Network Name       "
    print print_discovered_tenant(tenant_data,router_data,test_data)
    print "\n"

    print"              Overall Count of resources"     
    print print_consolidated_count(test_count)  
# Create an Denoising AE for each time step
compression_layer = []
for i in range(num_steps):
    # alternate GPU assignment
    gpu_id = 0 if i % 2 == 0 else 1
    with tf.device('/gpu:{}'.format(gpu_id)):
        network_params = {
            'keep_prob': keep_prob,
            'reg_param': reg_param,
            'noise_param': noise_param,
            'sizes': [num_input, num_units, num_input],
            'activations': [tf.nn.sigmoid, tf.identity]
        }

        network = create_network('DenoisingAutoEncoder', X, X, network_params)

        prediction = network.create_prediction()
        cost = network.create_cost()
        optimizer = tf.train.AdamOptimizer(
            learning_rate=learning_rate).minimize(cost)

        denoisingAE = dict(prediction=prediction,
                           cost=cost,
                           name='in_layer_denoising_ae_{}'.format(i + 1),
                           optimizer=optimizer)

        compression_layer.append(denoisingAE)

with tf.device('/gpu:0'):
    network_params = {
Example #11
0
# Create Networks
# network_params = {'keep_prob': keep_prob,
#                   'reg_param': reg_param,
#                   'sizes': [num_input * num_steps, 250, 4, 250,
#                             num_input * num_steps],
#                   'activations': [tf.nn.relu, tf.nn.sigmoid, tf.nn.relu,
#                                   tf.identity]}

network_params = {
    'num_units': num_units,
    'num_steps': num_steps,
    'num_out': num_out
}

model_name = 'LSTM RNN'
network = create_network('LSTM_RNN', X, Y, network_params)
prediction = network.create_prediction()
cost = network.create_cost()
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

init_op = tf.initialize_all_variables()
saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(init_op) if args.train else saver.restore(sess, 'model.ckpt')

    if args.train:
        costs = []
        for epoch in range(training_epochs):
            avg_cost = 0
            for i in range(training_size):
def main():
    """
    This method will initialize the scale test deployment based on the global \
    config parameters mentioned on config.py file and creates the router with \
    external gateway connectivity to public network.
    """

    tenant_data = []
    router_data = []
    test_data = []
    test_count={}
    if TENANT_CREATION == True:
        print "\n"
        print_scale_test_config()
        print "\n"
        pdb.set_trace()
        print "Starting Scale Test Deployment"
        index = TENANT_BASE_INDEX
        for i in range(1, TENANT_COUNT + 1):
            tenant_name = TENANT_NAME_PREFIX + '-' + str(index)
            index += 1
            try:
                tenant_data.append(create_tenant(tenant_name))
            except Exception as exc:
                print "Exception occured on Tenant Creation: %s" % (exc.message)
                pass
        test_count["tenant_count"]=len(tenant_data)
        for tenant in tenant_data:
            
            #test= {'count': {'tenant_count': len(tenant_data)}}
            #test_count.append(test)
            router_dict = {}
            prefix = tenant['tenant_name']
            router_name = prefix + '-router'
            router_dict['tenant_name'] = tenant['tenant_name']
            router_dict['router_name'] = router_name
            router_dict['network_vlan'] = {}
            router_dict['router_detail'] = {}
            router_dict['ipnatpool_data'] = {}
            router_dict['iproute_data'] = {}
            router_dict['interface_data'] = []
            router_dict['nat_data'] = []

            router = neutron.list_routers(name=EXTERNAL_NETWORK)['routers']
            if not router:
                router_info = neutron.create_router({'router': {
                    'name': router_name, 'tenant_id': tenant['tenant_id']}})
                router = router_info['router']
                status = True
            elif router[0]['tenant_id'] == tenant['tenant_id']:
                router = router[0]
                status = True
            else:
                router = {}
                status = False

        router_dict['router_status'] = status
        networks = neutron.list_networks(name=EXTERNAL_NETWORK)
        network_id = networks['networks'][0]['id']
        neutron.add_gateway_router(router['id'],
                                   {'network_id': network_id,
                                    'tenant_id': tenant['tenant_id']})
        router_id = router['id']
        print('   - Created Router %s' % router['name'])

        for i in range(1, NETWORK_COUNT + 1):
            network_index = str(i)
            network_cidr = str(i) + "." + str(i) + "." + str(i) + ".0/24"
            test_data.append(create_network(tenant, router,
                             network_index, network_cidr))
        network_vlan = {}
        for entry in test_data:
            network_vlan[str(entry['network_data']['network_vlan_id'])] = \
                entry['network_data']['network_name']
        router_dict['network_vlan'] = network_vlan

    else:
        print "\n"
        print "=" * 50
        print "Discovering Tenant Topology on Scale Test Deployment"
        print "=" * 50
        try:
            for i in range(len(TENANT_NAME)):
                tenant_name = TENANT_NAME[i]
                tenant_data.append(discover_tenant(tenant_name))
                #test= {'count': {'tenant_count': len(tenant_data)}}
                #test_count.append(test)
                test_count["tenant_count"]=len(tenant_data)
                for tenant in tenant_data:
                    router_dict = {}
                    prefix = tenant['tenant_name']
                    router_name = prefix + '-router'
                    router_dict['tenant_name'] = tenant['tenant_name']
                    router_dict['tenant_id'] = tenant['tenant_id']
                    router_dict['router_name'] = router_name
                    router_dict['network_vlan'] = {}
                    router_dict['router_detail'] = {}
                    router_dict['ipnatpool_data'] = {}
                    router_dict['iproute_data'] = {}
                    router_dict['interface_data'] = []
                    router_dict['nat_data'] = []
                pdb.set_trace()
                router = neutron.list_routers(name=router_name)['routers'][0]
                if router['tenant_id'] == tenant['tenant_id']:
                    print('   - Router %s Discovered' % router_name)
                    status = True
                else:
                    print('   - Router %s Not Found' % router_name)
                    status = False
                router_id = router['id']
                router_dict['router_status'] = status
                router_data.append(router_dict)
                #test1= {'count': {'router_count': len(router_data)}}
                #test_count.append(test1)  
                test_count["router_count"]=len(router_data)              
                for i in range(1, NETWORK_COUNT + 1):
                    network_index = str(i)
                    network_cidr = str(i) + "." + str(i) + "." + str(i) + ".0/24"
                    network_name = prefix + '-net-' + network_index
                    subnet_name = prefix + "-subnet-" + network_index
                    ins_data = [] 
                    networks = neutron.list_networks(name=network_name)['networks']
                    for i in range(len(networks)):
                        if networks[i]['tenant_id'] == tenant['tenant_id']:
                            network_id = networks[i]['id']
                            network_vlan = networks[i]['provider:segmentation_id']
                            print('   - Network %s Discovered' % network_name)
                            print('   - Network ID %s Discovered' % network_id)
                            print('   - VLAN ID %s Discovered' % network_vlan)
                            status = True
                            for j in range(1, VM_COUNT + 1):
                                vm_name = network_name + '-vm-' + str(j)
                                ins_data.append(discover_vm_on_network(tenant['tenant_name'], vm_name, network_id))
                        else:
                            print('   - Network %s Not Found' % network_name)
                            status = False
 
                    result = {'network_data': {'tenant_name': tenant['tenant_name'],
                                               'network_name': network_name,
                                               'network_cidr': network_cidr,
                                               'subnet_name': subnet_name,
                                               'network_id': network_id,
                                               'network_vlan_id': network_vlan,
                                               'status':status},
                              'instance_data': ins_data}
                    test_data.append(result)
                    test_count["network_count"]=test_data
        except Exception:
            print "\n"
            print('   - Tenant %s Not Found' % tenant_name)

    if ENABLE_ASR_VERIFICATION:
        vrf_router_id = router_id[:6]
        print vrf_router_id
        vrfname = "nrouter" + '-' + vrf_router_id + '-' + DEPLOYMENT_ID
        print vrf_name

        asr_verify_cmd = GetASRCmd(asr_host=ASR_HOST,
                                   asr_host_port=22,
                                   asr_user=ASR_USER,
                                   asr_password=ASR_PASSWORD,
                                   asr_slots=["0"])
        router_detail = {'vrfname': vrfname, 'interfaces': '',
                         'status': ''}
        ipnatpool_data = {'vrfname': vrfname, 'nat_pool_name': '',
                          'start_ip': '', 'end_ip': '', 'netmask': '',
                          'status': ''}
        iproute_data = {'vrfname': vrfname, 'prefix': '', 'mask': '',
                        'interface': '', 'next_hop_address': '',
                        'status': ''}
        interface_data = []
        nat_data = []
        try:
            router_detail = asr_verify_cmd.get_router_detail(vrfname)
            ipnatpool_data = asr_verify_cmd.get_ipnat_pool_detail(vrfname)
            iproute_data = asr_verify_cmd.get_iproute_detail(vrfname)
            for interface in router_detail['interfaces']:
                interface_data.append(
                    asr_verify_cmd.get_network_interface_detail(vrfname,
                                                                interface))
            for interface in interface_data:
                interfaceid = DEPLOYMENT_ID + '_' + interface['vlan_id']
                nat_data.append(
                    asr_verify_cmd.get_interface_nat_access_detail(
                        interface['vlan_id'], interfaceid))
            asr_report = True
        except Exception as exc:
            print "\n"
            print "[ERROR] Caught exception on ASR Verification : %s" % \
                (exc.message)
            print "\n"
            asr_report = False
        router_dict['router_detail'] = router_detail
        router_dict['ipnatpool_data'] = ipnatpool_data
        router_dict['iproute_data'] = iproute_data
        router_dict['interface_data'] = interface_data
        router_dict['nat_data'] = nat_data
        router_data.append(router_dict)

    print "\n"
    print "=" * 50
    print "Scale Test Discovery Completed"
    print "=" * 50
    
    print "*" * 80
    print "Scale Test Deployment OpenStack Report"
    print "*" * 80

    print "\n"
    print "           Tenant Discovery Results      "
    print print_tenant_info(tenant_data)
    print "\n"
    print "           Router Discovery Results      "
    print print_router_info(router_data)
    print "\n"
    print "                 Network Discovery Results      "
    print print_network_info(test_data)
    print "\n"
    
    print "            Instance Discovery Results      "
    print print_instance_info(test_data)
    print "\n"
    
    print "             Tenant Name, Router Name, Network Name       "
    print print_discovered_tenant(tenant_data,router_data,test_data)
    print "\n"

    print"              Overall Count of resources"     
    print print_consolidated_count(test_count)  

    if ENABLE_ASR_VERIFICATION and asr_report:
        print "           OpenStack-ASR Router VRF Verification Results      "
        print asr_router_vrf_info(router_data)
        print "\n"

        print "           OpenStack-ASR IP NAT Pool Verification Results     "
        print asr_ipnat_pool_info(router_data)
        print "\n"
        print "           OpenStack-ASR IP Route Verification Results        "
        print asr_iproute_info(router_data)
        print "\n"

        print "           OpenStack-ASR Network VRF Verification Results     "
        print asr_network_vrf_info(router_data)
        print "\n"

        print("           OpenStack-ASR Network Interface's Dynamic NAT & "
              "Access list Entry Verification Results              ")
        print asr_interface_nat_info(router_data)
        print "\n"
cost_threshold = tf.Variable([0, 0], dtype=tf.float32)

# Create Networks
# network_params = {'keep_prob': keep_prob,
#                   'reg_param': reg_param,
#                   'sizes': [num_input * num_steps, 250, 4, 250,
#                             num_input * num_steps],
#                   'activations': [tf.nn.relu, tf.nn.sigmoid, tf.nn.relu,
#                                   tf.identity]}

network_params = {'num_units': num_units,
                  'num_steps': num_steps,
                  'num_out': num_out}

model_name = 'LSTM RNN'
network = create_network('LSTM_RNN', X, Y, network_params)
prediction = network.create_prediction()
cost = network.create_cost()
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

init_op = tf.initialize_all_variables()
saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(init_op) if args.train else saver.restore(sess, 'model.ckpt')

    if args.train:
        costs = []
        for epoch in range(training_epochs):
            avg_cost = 0
            for i in range(training_size):
noise_param = tf.placeholder('float')
cost_threshold = tf.Variable([0, 0], dtype=tf.float32)

# Create Networks
model_name = 'Stacked Denoising AutoEncoder'

# Create an Denoising AE for each time step
compression_layer = []
for i in range(num_steps):
    network_params = {'keep_prob': keep_prob,
                      'reg_param': reg_param,
                      'noise_param': noise_param,
                      'sizes': [num_input, num_units, num_input],
                      'activations': [tf.nn.sigmoid, tf.identity]}

    network = create_network('DenoisingAutoEncoder', X, X, network_params)

    prediction = network.create_prediction()
    cost = network.create_cost()
    optimizer = tf.train.AdamOptimizer(
                learning_rate=learning_rate).minimize(cost)

    denoisingAE = dict(prediction=prediction,
                       cost=cost,
                       name='compression_layer_denoising_ae_{}'.format(i+1),
                       optimizer=optimizer)

    compression_layer.append(denoisingAE)


network_params={'keep_prob': keep_prob,
def main():
    """
    This method will initialize the scale test deployment based on the global \
    config parameters mentioned on config.py file and creates the router with \
    external gateway connectivity to public network.
    """
    main_start_time = datetime.now(timezone('US/Pacific'))
    print "\n"
    print_scale_test_config()
    print "\n"
    print "Starting Scale Test Deployment"

    tenant_data = []
    index = TENANT_BASE_INDEX
    start_time = datetime.now(timezone('US/Pacific'))
    for i in range(1, TENANT_COUNT + 1):
        tenant_name = TENANT_NAME_PREFIX + '-' + str(index)
        index += 1
        try:
            tenant_data.append(create_tenant(tenant_name))
        except Exception as exc:
            print "Exception accoured on Tenant Creation: %s" % (exc.message)
            pass

    end_time = datetime.now(timezone('US/Pacific'))
    print "-" * 65
    print("    Tenant & User Creation Time Summary :")
    print "-" * 65
    print "\n"
    print('   - Test Started Time   :\t %s' % (start_time.strftime(fmt)))
    print('   - Test Ended Time     :\t %s' % (end_time.strftime(fmt)))
    print "\n"

    router_data = []
    test_data = []
    for tenant in tenant_data:
        router_dict = {}
        prefix = tenant['tenant_name']
        router_name = prefix + '-router'
        router_dict['tenant_name'] = tenant['tenant_name']
        router_dict['router_name'] = router_name
        router_dict['network_vlan'] = {}
        router_dict['router_detail'] = {}
        router_dict['ipnatpool_data'] = {}
        router_dict['iproute_data'] = {}
        router_dict['interface_data'] = []
        router_dict['nat_data'] = []

        router = neutron.list_routers(name=router_name)['routers']
        if not router:
            start_time = datetime.now(timezone('US/Pacific'))
            router_info = neutron.create_router({
                'router': {
                    'name': router_name,
                    'tenant_id': tenant['tenant_id']
                }
            })
            router = router_info['router']
            status = True
        elif router[0]['tenant_id'] == tenant['tenant_id']:
            router = router[0]
            status = True
        else:
            router = {}
            status = False

        router_dict['router_status'] = status
        networks = neutron.list_networks(name=EXTERNAL_NETWORK)
        network_id = networks['networks'][0]['id']
        neutron.add_gateway_router(router['id'], {
            'network_id': network_id,
            'tenant_id': tenant['tenant_id']
        })
        router_id = router['id']
        print('   - Created Router %s' % router['name'])
        print "\n"
        end_time = datetime.now(timezone('US/Pacific'))
        print "\n"
        print "-" * 65
        print("    Router %s Creation Time Summary :" % router_name)
        print "-" * 65
        print "\n"
        print('   - Test Started Time   :\t %s' % (start_time.strftime(fmt)))
        print('   - Test Ended Time     :\t %s' % (end_time.strftime(fmt)))
        print "\n"

        for i in range(1, NETWORK_COUNT + 1):
            network_index = str(i)
            network_cidr = str(i) + "." + str(i) + "." + str(i) + ".0/24"
            test_data.append(
                create_network(tenant, router, network_index, network_cidr))

        network_vlan = {}
        for entry in test_data:
            network_vlan[str(entry['network_data']['network_vlan_id'])] = \
                entry['network_data']['network_name']
        router_dict['network_vlan'] = network_vlan

        if ENABLE_ASR_VERIFICATION:
            vrf_router_id = router_id[:6]
            vrfname = "nrouter" + '-' + vrf_router_id + '-' + DEPLOYMENT_ID
            start_time = datetime.now(timezone('US/Pacific'))
            asr_verify_cmd = GetASRCmd(asr_host=ASR_HOST,
                                       asr_host_port=22,
                                       asr_user=ASR_USER,
                                       asr_password=ASR_PASSWORD,
                                       asr_slots=["0"])
            router_detail = {
                'vrfname': vrfname,
                'interfaces': '',
                'status': ''
            }
            ipnatpool_data = {
                'vrfname': vrfname,
                'nat_pool_name': '',
                'start_ip': '',
                'end_ip': '',
                'netmask': '',
                'status': ''
            }
            iproute_data = {
                'vrfname': vrfname,
                'prefix': '',
                'mask': '',
                'interface': '',
                'next_hop_address': '',
                'status': ''
            }
            interface_data = []
            nat_data = []
            try:
                router_detail = asr_verify_cmd.get_router_detail(vrfname)
                ipnatpool_data = asr_verify_cmd.get_ipnat_pool_detail(vrfname)
                iproute_data = asr_verify_cmd.get_iproute_detail(vrfname)
                for interface in router_detail['interfaces']:
                    interface_data.append(
                        asr_verify_cmd.get_network_interface_detail(
                            vrfname, interface))
                for interface in interface_data:
                    interfaceid = DEPLOYMENT_ID + '_' + interface['vlan_id']
                    nat_data.append(
                        asr_verify_cmd.get_interface_nat_access_detail(
                            interface['vlan_id'], interfaceid))
                asr_report = True
                end_time = datetime.now(timezone('US/Pacific'))
                print "\n"
                print "-" * 65
                print("    ASR Functionality Verification Time Summary :")
                print "-" * 65
                print "\n"
                print('   - Test Started Time   :\t %s' %
                      (start_time.strftime(fmt)))
                print('   - Test Ended Time     :\t %s' %
                      (end_time.strftime(fmt)))
                print "\n"

            except Exception as exc:
                print "\n"
                print "[ERROR] Caught exception on ASR Verification : %s" % \
                    (exc.message)
                print "\n"
                asr_report = False
            router_dict['router_detail'] = router_detail
            router_dict['ipnatpool_data'] = ipnatpool_data
            router_dict['iproute_data'] = iproute_data
            router_dict['interface_data'] = interface_data
            router_dict['nat_data'] = nat_data
        router_data.append(router_dict)
    print "=" * 50
    print "\n"
    print "Scale Test Deployment Completed"
    print "\n"

    print "*" * 80
    print "Scale Test Deployment OpenStack Report"
    print "*" * 80

    print "\n"
    print "           Tenant Creation Results      "
    print print_tenant_info(tenant_data)
    print "\n"
    print "           Router Creation Results      "
    print print_router_info(router_data)
    print "\n"
    print "                 Network Creation Results      "
    print print_network_info(test_data)
    print "\n"

    print "            Instance Creation Results      "
    print print_instance_info(test_data)
    print "\n"

    if ENABLE_ASR_VERIFICATION and asr_report:
        print "           OpenStack-ASR Router VRF Verification Results      "
        print asr_router_vrf_info(router_data)
        print "\n"

        print "           OpenStack-ASR IP NAT Pool Verification Results     "
        print asr_ipnat_pool_info(router_data)
        print "\n"
        print "           OpenStack-ASR IP Route Verification Results        "
        print asr_iproute_info(router_data)
        print "\n"

        print "           OpenStack-ASR Network VRF Verification Results     "
        print asr_network_vrf_info(router_data)
        print "\n"

        print(
            "           OpenStack-ASR Network Interface's Dynamic NAT & "
            "Access list Entry Verification Results              ")
        print asr_interface_nat_info(router_data)
        print "\n"
def main():
    """
    This method will initialize the scale test deployment based on the global \
    config parameters mentioned on config.py file and creates the router with \
    external gateway connectivity to public network.
    """
    main_start_time = datetime.now(timezone('US/Pacific'))
    print "\n"
    print_scale_test_config()
    print "\n"
    print "Starting Scale Test Deployment"

    tenant_data = []
    index = TENANT_BASE_INDEX
    start_time = datetime.now(timezone('US/Pacific'))
    for i in range(1, TENANT_COUNT + 1):
        tenant_name = TENANT_NAME_PREFIX + '-' + str(index)
        index += 1
        try:
            tenant_data.append(create_tenant(tenant_name))
        except Exception as exc:
            print "Exception accoured on Tenant Creation: %s" % (exc.message)
            pass

    end_time = datetime.now(timezone('US/Pacific'))
    print "-"*65
    print ("    Tenant & User Creation Time Summary :")
    print "-"*65
    print "\n"
    print('   - Test Started Time   :\t %s' % (start_time.strftime(fmt)))
    print('   - Test Ended Time     :\t %s' % (end_time.strftime(fmt)))
    print "\n"

    router_data = []
    test_data = []
    for tenant in tenant_data:
        router_dict = {}
        prefix = tenant['tenant_name']
        router_name = prefix + '-router'
        router_dict['tenant_name'] = tenant['tenant_name']
        router_dict['router_name'] = router_name
        router_dict['network_vlan'] = {}
        router_dict['router_detail'] = {}
        router_dict['ipnatpool_data'] = {}
        router_dict['iproute_data'] = {}
        router_dict['interface_data'] = []
        router_dict['nat_data'] = []

        router = neutron.list_routers(name=router_name)['routers']
        if not router:
            start_time = datetime.now(timezone('US/Pacific'))
            router_info = neutron.create_router({'router': {
                'name': router_name, 'tenant_id': tenant['tenant_id']}})
            router = router_info['router']
            status = True
        elif router[0]['tenant_id'] == tenant['tenant_id']:
            router = router[0]
            status = True
        else:
            router = {}
            status = False

        router_dict['router_status'] = status
        networks = neutron.list_networks(name=EXTERNAL_NETWORK)
        network_id = networks['networks'][0]['id']
        neutron.add_gateway_router(router['id'],
                                   {'network_id': network_id,
                                    'tenant_id': tenant['tenant_id']})
        router_id = router['id']
        print('   - Created Router %s' % router['name'])
        print "\n"
        end_time = datetime.now(timezone('US/Pacific'))
        print "\n"
        print "-"*65
    	print ("    Router %s Creation Time Summary :"  % router_name)
        print "-"*65
        print "\n"
        print('   - Test Started Time   :\t %s' % (start_time.strftime(fmt)))
        print('   - Test Ended Time     :\t %s' % (end_time.strftime(fmt)))
        print "\n"
        
        for i in range(1, NETWORK_COUNT + 1):
            network_index = str(i)
            network_cidr = str(i) + "." + str(i) + "." + str(i) + ".0/24"
            test_data.append(create_network(tenant, router,
                             network_index, network_cidr))

        network_vlan = {}
        for entry in test_data:
            network_vlan[str(entry['network_data']['network_vlan_id'])] = \
                entry['network_data']['network_name']
        router_dict['network_vlan'] = network_vlan

        if ENABLE_ASR_VERIFICATION:
            vrf_router_id = router_id[:6]
            vrfname = "nrouter" + '-' + vrf_router_id + '-' + DEPLOYMENT_ID
            start_time = datetime.now(timezone('US/Pacific'))
            asr_verify_cmd = GetASRCmd(asr_host=ASR_HOST,
                                       asr_host_port=22,
                                       asr_user=ASR_USER,
                                       asr_password=ASR_PASSWORD,
                                       asr_slots=["0"])
            router_detail = {'vrfname': vrfname, 'interfaces': '',
                             'status': ''}
            ipnatpool_data = {'vrfname': vrfname, 'nat_pool_name': '',
                              'start_ip': '', 'end_ip': '', 'netmask': '',
                              'status': ''}
            iproute_data = {'vrfname': vrfname, 'prefix': '', 'mask': '',
                            'interface': '', 'next_hop_address': '',
                            'status': ''}
            interface_data = []
            nat_data = []
            try:
                router_detail = asr_verify_cmd.get_router_detail(vrfname)
                ipnatpool_data = asr_verify_cmd.get_ipnat_pool_detail(vrfname)
                iproute_data = asr_verify_cmd.get_iproute_detail(vrfname)
                for interface in router_detail['interfaces']:
                    interface_data.append(
                        asr_verify_cmd.get_network_interface_detail(vrfname,
                                                                    interface))
                for interface in interface_data:
                    interfaceid = DEPLOYMENT_ID + '_' + interface['vlan_id']
                    nat_data.append(
                        asr_verify_cmd.get_interface_nat_access_detail(
                            interface['vlan_id'], interfaceid))
                asr_report = True
                end_time = datetime.now(timezone('US/Pacific'))
                print "\n"
                print "-"*65
                print ("    ASR Functionality Verification Time Summary :")
                print "-"*65
                print "\n"
                print('   - Test Started Time   :\t %s' % (start_time.strftime(fmt)))
                print('   - Test Ended Time     :\t %s' % (end_time.strftime(fmt)))
                print "\n"

            except Exception as exc:
                print "\n"
                print "[ERROR] Caught exception on ASR Verification : %s" % \
                    (exc.message)
                print "\n"
                asr_report = False
            router_dict['router_detail'] = router_detail
            router_dict['ipnatpool_data'] = ipnatpool_data
            router_dict['iproute_data'] = iproute_data
            router_dict['interface_data'] = interface_data
            router_dict['nat_data'] = nat_data
        router_data.append(router_dict)
    print "=" * 50
    print "\n"
    print "Scale Test Deployment Completed"
    print "\n"

    print "*" * 80
    print "Scale Test Deployment OpenStack Report"
    print "*" * 80

    print "\n"
    print "           Tenant Creation Results      "
    print print_tenant_info(tenant_data)
    print "\n"
    print "           Router Creation Results      "
    print print_router_info(router_data)
    print "\n"
    print "                 Network Creation Results      "
    print print_network_info(test_data)
    print "\n"

    print "            Instance Creation Results      "
    print print_instance_info(test_data)
    print "\n"

    if ENABLE_ASR_VERIFICATION and asr_report:
        print "           OpenStack-ASR Router VRF Verification Results      "
        print asr_router_vrf_info(router_data)
        print "\n"

        print "           OpenStack-ASR IP NAT Pool Verification Results     "
        print asr_ipnat_pool_info(router_data)
        print "\n"
        print "           OpenStack-ASR IP Route Verification Results        "
        print asr_iproute_info(router_data)
        print "\n"

        print "           OpenStack-ASR Network VRF Verification Results     "
        print asr_network_vrf_info(router_data)
        print "\n"

        print("           OpenStack-ASR Network Interface's Dynamic NAT & "
              "Access list Entry Verification Results              ")
        print asr_interface_nat_info(router_data)
        print "\n"