Example #1
0
async def get_networks(data_store, amqp, node_id=None):
    ret = utils.get_objects(data_store,
                            amqp,
                            Network_schema,
                            utils.KEY_NETWORK,
                            node_id=None)
    raise web.HTTPOk(content_type="application/json", text=ret)
Example #2
0
    def add_nfs_datastore(self,
                          name,
                          remote_host,
                          remote_path,
                          read_only=False,
                          username=None,
                          password=None):

        # TODO: self.hosts
        # Get all hosts
        hosts = utils.get_objects(vimtypes=[vim.host])

        spec = vim.host.NasVolume.Specification()
        spec.remoteHost = remote_host
        spec.remotePath = remote_path
        spec.localPath = name

        if self.read_only:
            spec.accessMode = "readOnly"
        else:
            spec.accessMode = "readWrite"

        for host in hosts:
            # For each host add NAS datastore
            host.configManager.DatastoreSystem.CreateNasDatastore(spec)
Example #3
0
async def get_l2_tunnels(data_store, amqp, node_id=None):
    ret = utils.get_objects(data_store,
                            amqp,
                            L2_tunnel_schema,
                            utils.KEY_L2_TUNNEL,
                            node_id=None)
    raise web.HTTPOk(content_type="application/json", text=ret)
async def get_ipsec_policies(data_store, amqp, node_id=None):
    ret = utils.get_objects(data_store,
                            amqp,
                            Ipsec_policy_schema,
                            utils.KEY_POLICY_IPSEC,
                            node_id=None)
    raise web.HTTPOk(content_type="application/json", text=ret)
Example #5
0
async def get_mptcp_proxies(data_store, amqp, node_id=None):
    ret = utils.get_objects(data_store,
                            amqp,
                            Mptcp_proxy_schema,
                            utils.KEY_MPTCP_PROXY,
                            node_id=None)
    raise web.HTTPOk(content_type="application/json", text=ret)
async def get_vpn_connections(data_store, amqp, node_id=None):
    ret = utils.get_objects(data_store,
                            amqp,
                            Vpn_connection_schema,
                            utils.KEY_CONNECTION,
                            node_id=None)
    raise web.HTTPOk(content_type="application/json", text=ret)
Example #7
0
async def get_expansions(data_store, amqp, node_id=None, network_id=None):
    if network_id:
        if not data_store.has((utils.KEY_NETWORK, network_id)):
            raise web.HTTPNotFound(text="Network Not Found")
        expansions = data_store.lookup_list((utils.KEY_IN_USE, network_id))
        ret = Expansion_schema().dumps(expansions, many=True).data
    else:
        ret = utils.get_objects(data_store,
                                amqp,
                                Expansion_schema,
                                utils.KEY_EXPANSION,
                                node_id=None)
    raise web.HTTPOk(content_type="application/json", text=ret)
Example #8
0
    def sample_latent(self,
                      input,
                      input_latent_mu,
                      input_latent_sigma,
                      pred_latent_mu,
                      pred_latent_sigma,
                      initial_pose_mu,
                      initial_pose_sigma,
                      masks,
                      sample=True):
        '''
    Samples latent variables given the pose vectors and sampled missing labels.
    '''
        latent = defaultdict(lambda: None)

        beta = self.get_transitions(input_latent_mu, input_latent_sigma,
                                    pred_latent_mu, pred_latent_sigma, sample)
        pose = utils.accumulate_pose(beta)
        # Sample initial pose
        initial_pose = self.pyro_sample('initial_pose', dist.Normal,
                                        initial_pose_mu, initial_pose_sigma,
                                        sample)
        pose += initial_pose.view(-1, 1, self.n_components,
                                  self.pose_latent_size)

        if self.pedestrian:
            pose = utils.constrain_pose_pedestrian(pose, self.scale,
                                                   self.gamma_steps)
        else:
            pose = utils.constrain_pose(pose, self.scale, self.gamma_steps)

        # Get input objects
        input_pose = pose[:, :self.n_frames_input, :, :]

        input_obj = utils.get_objects(input, input_pose, self.n_components,
                                      self.object_size)
        # Encode the sampled objects

        appearance = self.encode_appearance_and_sample(input_obj, masks,
                                                       sample)

        latent.update({'pose': pose, 'appearance': appearance, 'mask': masks})
        return latent
Example #9
0
from torch.utils.data import DataLoader
from torch.optim import Adam
from tqdm import tqdm
from sklearn.model_selection import train_test_split

from dataset import TextDetectionDataset
from utils import get_images, get_objects
from model import SSD, MultiBoxLoss
from dataset import collate_fn

# 2.Dataloader
IMAGES_JSON_PATH = './data/train/images.json'
images = get_images(IMAGES_JSON_PATH)
OBJECTS_JSON_PATH = './data/train/objects.json'
objects = get_objects(OBJECTS_JSON_PATH)

BATCH_SIZE = 5

X_train, X_val, y_train, y_val = train_test_split(images,
                                                  objects,
                                                  test_size=0.3,
                                                  random_state=2021)

train_dataset = TextDetectionDataset(X_train, y_train)
val_dataset = TextDetectionDataset(X_val, y_val)

train_dataloader = DataLoader(train_dataset,
                              batch_size=BATCH_SIZE,
                              collate_fn=collate_fn)
val_dataset = DataLoader(val_dataset,
                         batch_size=BATCH_SIZE,
Example #10
0

args = ap.parse_args()
path_to_image, path_to_classifier, path_to_db, roi_height, dilate_kernel_size, erode_scale, v, vv = \
    args.image, args.classifier, args.db, args.rh, args.dk, args.es, args.v, args.vv
image = cv2.imread(path_to_image)

# Отображаем ROI для захвата целевого региона
digits_region = utils.get_region(image, roi_height=roi_height)
# Распиливаем на отдельные циферки
print('Segmenting...')

unified_digits = []

try:
    digits = utils.get_objects(digits_region, dilate_kernel_size=dilate_kernel_size, erode_scale=erode_scale,
                               display_results=True if v or vv else False, display_intermediate=True if vv else False)

    # Унифицируем изображения, что бы классификатор мог их понимать
    unified_digits = np.array(list(map(lambda d: utils.get_unified_binary_image(d, (30, 30)), digits)))
    unified_digits = unified_digits.reshape((unified_digits.shape[0], 30, 30, 1))
except utils.DetectorError as err:
    print('Segmentation failed. Error: {}'.format(err.message))
    exit(1)

print('Found {} objects.'.format(len(unified_digits)))

if len(unified_digits) > 0:
    # Классифицируем
    print('Classifying...')

    classifier = load_model(path_to_classifier)
Example #11
0
    "x_max": ROI_X_MAX,
    "y_max": ROI_Y_MAX,
}

## Process image
if img_file_buffer is not None:
    pil_image = Image.open(img_file_buffer)

else:
    pil_image = Image.open(TEST_IMAGE)

dsobject = ds.DeepstackObject(DEEPSTACK_IP, DEEPSTACK_PORT, DEEPSTACK_API_KEY,
                              DEEPSTACK_TIMEOUT)

predictions = process_image(pil_image, dsobject)
objects = utils.get_objects(predictions, pil_image.width, pil_image.height)
all_objects_names = set([obj["name"] for obj in objects])

# Filter objects for display
objects = [obj for obj in objects if obj["confidence"] > CONFIDENCE_THRESHOLD]
objects = [obj for obj in objects if obj["name"] in CLASSES_TO_INCLUDE]
objects = [
    obj for obj in objects if utils.object_in_roi(ROI_DICT, obj["centroid"])
]

# Draw object boxes
draw = ImageDraw.Draw(pil_image)
for obj in objects:
    name = obj["name"]
    confidence = obj["confidence"]
    box = obj["bounding_box"]
Example #12
0
def dump_mp3d_datasets(
        output_root,
        task='grid_fov_pretraining',
        task_root='',
        graph_root='',
        obj_dict_file='../data/vg_object_dictionaries.top100.matterport3d.json',
        image_list_file='../data/imagelist.matterport3d.txt',
        degree=45):
    '''Prepares and dumps dataset to an npy file.
  '''
    if task_root != '' and not os.path.exists(task_root):
        try:
            os.makedirs(task_root)
        except:
            print('Cannot create folder {}'.format(task_root))
            quit(1)

    dicts = get_mp3d_dictionaries()
    vg2idx = json.load(open(obj_dict_file, 'r'))['vg2idx']

    data_list = defaultdict(list)
    data = []

    image_list = [line.strip().split('.')[0] for line in open(image_list_file)]
    pbar = tqdm(image_list)

    for ii, pano in enumerate(pbar):
        splits = dicts['viewpoint2split'][pano]
        if task == 'grid_fov_pretraining' and task_root != '':
            grid_nodes, _ = generate_grid(degree=degree)
            node_path = os.path.join(graph_root, '{}.npy'.format(pano))
            nodes = np.load(node_path, allow_pickle=True)[()]

            for n in grid_nodes:
                node = grid_nodes[n]

                mdatum = {}
                fov_id = node['id']
                mdatum['fov_id'] = fov_id

                mdatum['pano'] = pano

                lat, lng = node['lat'], node['lng']
                mx, my = node['x'], node['y']
                mdatum['latitude'] = lat
                mdatum['longitude'] = lng
                mdatum['x'] = mx
                mdatum['y'] = my

                mdatum['refexps'] = []
                fov_file = os.path.join(task_root,
                                        '{}.fov{}.jpg'.format(pano, fov_id))

                mdatum['fov_file'] = fov_file
                regions, obj_list = get_objects(mx, my, nodes, vg2idx)
                mdatum['regions'] = regions
                mdatum['obj_list'] = obj_list

                directions = [
                    len(obj_list['canonical'][d]) > 0
                    for d in ['up', 'down', 'left', 'right']
                ]

                if any(directions):
                    for split in splits:
                        data_list[split].append(mdatum)
        else:
            raise NotImplementedError()

    pbar.close()

    for split in R2R_SPLIT_NAMES:
        output_file = os.path.join(output_root,
                                   '{}.[all].imdb.npy'.format(split))

        print('Dumping {} instances to {}'.format(len(data_list[split]),
                                                  output_file))
        np.save(open(output_file, 'wb'), {
            'data_list': [data_list[split]],
            'sentences': []
        })
Example #13
0
def dump_td_datasets(splits,
                     output_file,
                     task='continuous_grounding',
                     task_root='',
                     graph_root='',
                     full_w=3000,
                     full_h=1500,
                     obj_dict_file='../data/vg_object_dictionaries.all.json',
                     degree=30,
                     cache_root='../data/cached_td_data30degrees/'):  # FIX
    '''Prepares and dumps dataset to an npy file.
  '''

    if task_root != '' and not os.path.exists(task_root):
        try:
            os.makedirs(task_root)
        except:
            print('Cannot create folder {}'.format(task_root))
            quit(1)

    if cache_root:
        meta_file = os.path.join(cache_root, 'meta.npy')
        meta = np.load(meta_file, allow_pickle=True)[()]
        cached_nodes = meta['nodes']
        cached_paths = meta['paths']
        add_cached_path = True
    else:
        raise NotImplementedError()

    vg2idx = json.load(open(obj_dict_file, 'r'))['vg2idx']

    data_list = []

    all_sentences = []

    stats = defaultdict(int)
    for split in splits:
        data = []

        td_data_file = '../data/td_data/{}.json'.format(split)
        lines = [(ii, line) for ii, line in enumerate(open(td_data_file))]
        pbar = tqdm(lines)
        count_err = 0
        for ii, line in pbar:
            datum = {}

            instance = {}
            td_instance = json.loads(line)

            instance['img_idx'] = td_instance['main_pano']
            instance['img_cat'] = 'street'
            instance['img_loc'] = 'outdoor'
            instance['img_src'] = '../data/td_data/images/' + \
                td_instance['main_pano'] + '.jpg'
            instance['annotationid'] = td_instance['route_id']
            center = json.loads(td_instance['main_static_center'])
            gt_x, gt_y = int(center['x'] * full_w), int(center['y'] * full_h)

            xlng_deg, ylat_deg = coordinate2degrees(gt_x,
                                                    gt_y,
                                                    full_w=full_w,
                                                    full_h=full_h)
            instance['xlng_deg'], instance['ylat_deg'] = xlng_deg, ylat_deg
            instance['refexp'] = [td_instance['td_location_text'].split(' ')]

            n_rows = int(360 / degree)
            start_fov = np.random.randint(n_rows)
            start_node = cached_nodes[start_fov]
            assert start_node['idx'] == start_fov
            instance['actions'] = [{
                'act_deg_list': [[[start_node['lng'], start_node['lat']],
                                  [xlng_deg, ylat_deg]]],
                'actionid':
                instance['annotationid']
            }]
            all_moves = [[(start_node['lng'], start_node['lat']),
                          (xlng_deg, ylat_deg)]]

            xlongitude, ylatitude = instance['xlng_deg'], instance['ylat_deg']

            datum['annotationid'] = instance['annotationid']
            datum['gt_lng'] = xlongitude
            datum['gt_lat'] = ylatitude
            datum['gt_x'] = gt_x
            datum['gt_y'] = gt_y

            img_cat = instance['img_cat']
            img_loc = instance['img_loc']
            img_src = instance['img_src']

            datum['img_src'] = img_src
            datum['img_category'] = img_cat
            stats[img_loc] += 1
            stats[img_cat] += 1

            sent_queue = []
            sentences = instance['refexp']
            for refexp in sentences:
                sent_queue += [refexp]

            datum['gt_moves'] = all_moves
            datum['refexps'] = sentences
            all_sentences += sent_queue

            start_loc = instance['actions'][0]['act_deg_list'][0][0]
            start_x, start_y = get_coordinates(start_loc[0],
                                               start_loc[1],
                                               full_w=full_w,
                                               full_h=full_h)
            start_fov, _ = get_nearest(cached_nodes, start_x, start_y)
            gt_path = [start_fov]
            path = []
            intermediate_paths = []

            if add_cached_path:
                for kk, act_list in enumerate(
                        instance['actions'][0]['act_deg_list']):
                    act = act_list[-1]
                    lng, lat = act
                    x, y = get_coordinates(lng,
                                           lat,
                                           full_w=full_w,
                                           full_h=full_h)

                    min_n, _ = get_nearest(cached_nodes, x, y)
                    gt_path.append(min_n)

                path = [gt_path[0]]
                for kk in range(len(gt_path) - 1):
                    start = gt_path[kk]
                    end = gt_path[kk + 1]
                    intermediate_path = cached_paths[start][end]
                    path += intermediate_path[1:]
                    intermediate_paths.append(intermediate_path)
                assert len(gt_path) <= len(
                    path), 'len(gt_path) <= len(path) {} > {}'.format(
                        len(gt_path), len(path))
                assert len(datum['refexps']) == len(
                    intermediate_paths
                ), 'len(refepxs) != len(intermediate_paths) {} != {}'.format(
                    len(datum['refexps']), len(intermediate_paths))
                datum['gt_path'] = gt_path
                datum['path'] = path
                datum['intermediate_paths'] = intermediate_paths
                datum['actionid'] = instance['actions'][0]['actionid']
                datum['actions'] = instance['actions']
            if task == 'continuous_grounding':
                data.append(datum)
            elif task == 'cached_fov_pretraining':
                pano = instance['img_idx']
                node_path = os.path.join(graph_root, '{}.npy'.format(pano))
                nodes = np.load(node_path, allow_pickle=True)[()]

                for n in cached_nodes:
                    node = cached_nodes[n]
                    mdatum = {}
                    fov_id = node['idx']
                    mdatum['fov_id'] = fov_id

                    mdatum['move_max'] = len(sentences)
                    mdatum['img_src'] = datum['img_src']
                    # mdatum['actionid'] = move_id
                    mdatum['annotationid'] = instance['annotationid']

                    ylat, xlng = node['lat'], node['lng']
                    mx, my = node['x'], node['y']
                    mdatum['xlongitude'] = xlng
                    mdatum['ylatitude'] = ylat
                    mdatum['x'] = mx
                    mdatum['y'] = my

                    mdatum['refexps'] = sentences
                    fov_file = os.path.join(
                        cache_root, 'fovs', task_root,
                        'pano_{}.{}.jpg'.format(pano, fov_id))

                    mdatum['fov_file'] = fov_file
                    regions, obj_list = get_objects(mx,
                                                    my,
                                                    nodes,
                                                    vg2idx,
                                                    full_w=full_w,
                                                    full_h=full_h,
                                                    include_vectors=False)
                    mdatum['regions'] = regions
                    mdatum['obj_list'] = obj_list

                    directions = [
                        len(obj_list['navigation'][d])
                        for d in obj_list['navigation'].keys()
                    ]

                    if sum(directions) > 0:
                        data.append(mdatum)

        data_list.append(data)
        print('{} instances have errors'.format(count_err))
        pbar.close()

    n_instances = sum([len(l) for l in data_list])
    print('Dumping {} instances to {}'.format(n_instances, output_file))
    np.save(open(output_file, 'wb'), {
        'data_list': data_list,
        'sentences': all_sentences
    })