Пример #1
0
def areDoorsInRoom2021(level):
    level_doorfix = level.copy()
    # for each room in level, check each door; 
    for room in level_doorfix['rooms']:
        # inDatabase Check: 
        for o in room['objList']:
            if o['modelId'] in sk_to_ali or o['modelId'] in suncg:
                o['inDatabase'] = True
            else:
                o['inDatabase'] = False
        if not os.path.exists('room/{}/{}f.obj'.format(room['origin'], room['modelId'])):
            continue
        try:
            room_meta = p2d('.', 'room/{}/{}f.obj'.format(room['origin'], room['modelId']))
            room_polygon = Polygon(room_meta[:, 0:2]) # requires python library 'shapely'
        except Exception as e:
            print(e)
            continue
        for r in level['rooms']:
            for obj in r['objList']:
                if obj is None:
                    continue
                if 'coarseSemantic' not in obj:
                    continue
                if obj['coarseSemantic'] not in ['door', 'Door']:
                    continue
                block = windoorblock_f(obj)
                block_polygon = Polygon(block['windoorbb']).buffer(.03)
                # for this time, we do not duplicate doors, instead we add roomIds to the obj. 
                if room_polygon.intersects(block_polygon):
                    if 'roomIds' not in obj:
                        obj['roomIds'] = []
                    obj['roomIds'].append(room['roomId'])
    return level_doorfix
Пример #2
0
def fa_reshuffle(rj):
    pend_obj_list = []
    final_obj_list = []
    bbindex = []
    ol = rj['objList']
    print("Total Number of objects: ", len(ol))
    for o in ol:
        if o is None:
            continue
        if o['modelId'] not in obj_semantic:
            final_obj_list.append(o)
            continue
        if 'coarseSemantic' in o:
            if o['coarseSemantic'] in BANNED:
                final_obj_list.append(o)
                continue
        bbindex.append(name_to_ls[o['modelId']])
        pend_obj_list.append(o)
    room_meta = p2d(
        '.', '/suncg/room/{}/{}f.obj'.format(rj['origin'], rj['modelId']))
    room_polygon = Polygon(room_meta[:, 0:2])
    room_shape = torch.from_numpy(room_meta[:, 0:2]).float()
    room_shape_norm = torch.from_numpy(room_meta).float()
    for o in pend_obj_list:
        disturbance(o, 0.5, room_polygon)
    for o in pend_obj_list:
        o['rotate'][0] = 0.0
        o['rotate'][1] = o['orient']
        o['rotate'][2] = 0.0
    return rj
Пример #3
0
def floorplanOrthes():
    pt.cameraType = 'orthographic'
    pt.SAVECONFIG = False
    pt.REMOVELAMP = True
    floorplanlist = os.listdir('./dataset/alilevel_door2021/')
    # for floorplanfile in floorplanlist:
    for floorplanfile in [
            'e8b0a6bf-58a2-49de-b9ea-231995fc9e3b.json',
            '317d64ff-b96e-4743-88f6-2b5b27551a7c.json'
    ]:
        if '.json' not in floorplanfile:
            continue
        with open(f'./dataset/alilevel_door2021/{floorplanfile}') as f:
            scenejson = json.load(f)
        # if os.path.exists(f"./dataset/alilevel_door2021_orth/{scenejson['origin']}.png"):
        #     continue
        points = []
        for room in scenejson['rooms']:
            try:
                floorMeta = p2d(
                    '.',
                    '/dataset/room/{}/{}f.obj'.format(room['origin'],
                                                      room['modelId']))
                points += floorMeta[:, 0:2].tolist()
                wallMeta = sk.getMeshVertices(
                    '/dataset/room/{}/{}w.obj'.format(room['origin'],
                                                      room['modelId']))
                points += wallMeta[:, [0, 2]].tolist()
            except:
                continue
        v = np.array(points)
        l = np.min(v[:, 0])
        r = np.max(v[:, 0])
        u = np.min(v[:, 1])
        d = np.max(v[:, 1])
        # orthViewLen = max((r - l), (d - u)) + 0.45
        orthViewLen = (r - l) + 0.45
        scenejson["PerspectiveCamera"] = {}
        scenejson["PerspectiveCamera"]['origin'] = [(r + l) / 2, 50,
                                                    (d + u) / 2]
        scenejson["PerspectiveCamera"]['target'] = [(r + l) / 2, 0,
                                                    (d + u) / 2]
        scenejson["PerspectiveCamera"]['up'] = [0, 0, 1]
        scenejson["OrthCamera"] = {}
        scenejson["OrthCamera"]['x'] = orthViewLen / 2
        scenejson["OrthCamera"]['y'] = orthViewLen / 2
        scenejson["canvas"] = {}
        scenejson['canvas']['width'] = int((r - l) * 100)
        scenejson['canvas']['height'] = int((d - u) * 100)
        print(f'Rendering {floorplanfile} ...')
        try:
            pt.pathTracing(
                scenejson, 64,
                f"./dataset/alilevel_door2021_orth/{scenejson['origin']}.png")
        except Exception as e:
            print(e)
            continue
    # swap the cameraType back to perspective cameras.
    pt.cameraType = 'perspective'
Пример #4
0
def mageAddAuto():
    rj = request.json
    if 'auxiliaryDomObj' in rj:
        room_meta = rj['auxiliaryDomObj']['room_meta']
    else:
        room_meta = p2d('.', f'/dataset/room/{rj["origin"]}/{rj["modelId"]}f.obj')
    samples = random_points_within(Polygon(room_meta), 1000)
    print(samples[0])
    return json.dumps(samples)
Пример #5
0
def alilevel2room():
    AREA = {}
    levelnames = os.listdir('./dataset/alilevel_windoorFix')
    for levelname in levelnames:
        with open(f'./dataset/alilevel_windoorFix/{levelname}') as f:
            level = json.load(f)
        for room in level['rooms']:
            if len(room['roomTypes']) == 0:
                roomtype = 'Unknown'
            else:
                roomtype = room['roomTypes'][0]
            newlevel = level.copy()
            newlevel['rooms'] = [room]
            if (len(room['objList']) != 0):
                newlevel['bbox'] = room['bbox']
            newlevel['rooms'][0]['roomId'] = 0
            try:
                newlevel['rooms'][0]['area'] = Polygon(
                    p2d(f'./dataset/room/{newlevel["origin"]}',
                        f'{newlevel["rooms"][0]["modelId"]}f.obj')).area
            except Exception as e:
                newlevel['rooms'][0]['area'] = 0.0
                print(e)
            for o in newlevel['rooms'][0]['objList']:
                o['roomId'] = 0
                if o['modelId'] in sk_to_ali or o['modelId'] in suncg:
                    o['inDatabase'] = True
                else:
                    o['inDatabase'] = False
            if not os.path.exists(f'./dataset/alilevel_inRooms/{roomtype}'):
                os.makedirs(f'./dataset/alilevel_inRooms/{roomtype}')
            if roomtype not in AREA:
                AREA[roomtype] = {}
            samefileindex = 0
            finalfilename = f'./dataset/alilevel_inRooms/{roomtype}/{roomtype}-{newlevel["origin"]}-{samefileindex}.json'
            while os.path.exists(finalfilename):
                samefileindex += 1
                finalfilename = f'./dataset/alilevel_inRooms/{roomtype}/{roomtype}-{newlevel["origin"]}-{samefileindex}.json'
            with open(finalfilename, 'w') as f:
                json.dump(newlevel, f)
            AREA[roomtype][
                f'{roomtype}-{newlevel["origin"]}-{samefileindex}'] = newlevel[
                    'rooms'][0]['area']
    with open('./dataset/AREA.json', 'w') as f:
        json.dump(AREA, f)
Пример #6
0
def balancing(h, room, theta):
    """
    'h' is a generated probe view. 
    """
    h['direction'] /= np.linalg.norm(h['direction'])
    floorMeta = p2d(
        '.', '/dataset/room/{}/{}f.obj'.format(room['origin'],
                                               room['modelId']))
    onePlanePointList = []
    for obj in room['objList']:
        if not isObjectInSight(obj, h['probe'], h['direction'], floorMeta,
                               theta, room['objList'], False):
            continue
        probeTot = np.array(obj['translate']) - h['probe']
        cosToDirection = np.dot(probeTot,
                                h['direction']) / np.linalg.norm(probeTot)
        DIS = 1 / cosToDirection
        DRC = probeTot / np.linalg.norm(probeTot)
        onePlanePointList.append(h['probe'] + DIS * DRC)
    centroid = sum(onePlanePointList) / len(onePlanePointList)
    newDirection = centroid - h['probe']
    newDirection /= np.linalg.norm(newDirection, ord=2)
    return newDirection
Пример #7
0
def autoViewsRodrigues(room, scene):
    # change the fov/2 to Radian.
    theta = (np.pi * scene['PerspectiveCamera']['fov'] / 180) / 2
    # the the floor meta.
    floorMeta = p2d(
        '.', '/dataset/room/{}/{}f.obj'.format(room['origin'],
                                               room['modelId']))
    floorPoly = Polygon(floorMeta[:, 0:2])
    # the height of the wall.
    H = sk.getWallHeight(
        f"./dataset/room/{room['origin']}/{room['modelId']}w.obj")
    pcams = []
    for wallIndex in range(floorMeta.shape[0]):
        pcam = {}
        wallIndexNext = (wallIndex + 1) % floorMeta.shape[0]
        middlePoint = (floorMeta[wallIndex][0:2] +
                       floorMeta[wallIndexNext][0:2]) / 2
        middlePoint += floorMeta[wallIndex][2:4] * 0.005
        origin = middlePoint.tolist()
        origin.insert(1, H / 2)
        direction = floorMeta[wallIndex][2:4].tolist()
        direction.insert(1, 0.)
        origin = np.array(origin)
        direction = np.array(direction)

        pcam['theta'] = theta
        pcam['roomId'] = room['roomId']
        # pcam['viewLength'] = np.linalg.norm(middlePoint - p, ord=2)
        pcam['probe'] = origin
        pcam['wallIndex'] = wallIndex
        pcam['direction'] = groundShifting(origin, floorMeta, floorPoly,
                                           direction, theta, H)
        pcam['type'] = 'againstMidWall'
        pcams.append(pcam)

    return pcams
Пример #8
0
def sceneSynthesis(rj):
    print(rj['origin'])
    start_time = time.time()
    pend_obj_list = []
    bbindex = []
    blocks = []
    random.shuffle(rj['objList'])
    # bounding boxes of given furniture objects; 
    boundingboxes = []
    max_points = []
    min_points = []
    # identifying objects to arrange; 
    for o in rj['objList']:
        if o is None:
            print('this is a None object; ')
            continue
        if 'coarseSemantic' not in o:
            print('a given object does not have coarseSemantic; ')
            continue
        if o['coarseSemantic'] in BANNED:
            print('a given object is not a furniture;' )
            continue
        if o['coarseSemantic'] == 'door' or o['coarseSemantic'] == 'window':
            blocks.append(windoorblock_f(o))
            continue
        # if o['modelId'] not in obj_semantic:
        #     print(f'a given object {o["modelId"]} is not a furniture;' )
        #     continue
        # bbindex.append(name_to_ls[o['modelId']])
        try:
            boundingboxes.append(load_boundingbox(o['modelId']))
        except Exception as e:
            continue
        aabb = load_AABB(o['modelId'])
        max_points.append(aabb['max'])
        min_points.append(aabb['min'])
        o['childnum'] = {}
        o['myparent'] = None
        pend_obj_list.append(o)
    # load priors; 
    csrrelation = torch.zeros((len(pend_obj_list), len(pend_obj_list)), dtype=torch.float)
    for center in pend_obj_list:
        for obj in pend_obj_list:
            preload_prior(center['modelId'], obj['modelId'])
    for centerid in range(len(pend_obj_list)):
        center = pend_obj_list[centerid]
        for objid in range(len(pend_obj_list)):
            if objid == centerid:
                continue
            obj = pend_obj_list[objid]
            # if the obj has a parent, we have to continue; 
            # because if multiple parents exist, two parent may share a same child while another child has no parent;
            if obj['myparent'] is not None:
                continue
            pid = "{}-{}".format(center['modelId'], obj['modelId'])
            if pid in priors['pos']:
                if obj['modelId'] not in center['childnum']:
                    center['childnum'][obj['modelId']] = 0
                if center['childnum'][obj['modelId']] >= priors['chainlength'][pid]:
                    continue
                csrrelation[centerid, objid] = 1.
                obj['myparent'] = center
                center['childnum'][obj['modelId']] += 1
    # partition coherent groups; 
    pend_groups = connected_component(np.arange(len(pend_obj_list)), csrrelation)
    cgs = []
    for pend_group in pend_groups:
        cg = {}
        cg['objList'] = [pend_obj_list[i] for i in pend_group]
        cg['csrrelation'] = csrrelation[pend_group][:, pend_group]
        cg['translate'] = [0.0, 0.0, 0.0]
        cg['orient'] = 0.0
        # determine layouts of each group; 
        heuristic(cg)
        # the following code is for chain pattern; 
        # if only one object exists in a cg && the object follows chain layout, 
        # e.g., kitchen cabinet, shelving, etc; 
        if len(cg['objList']) == 1 and cg['objList'][0]['coarseSemantic'] in NaiveChainList:
            cg['chain'] = cg['objList'][0]['coarseSemantic']
        else:
            cg['chain'] = 'n'
        if cg['objList'][cg['leaderID']]['modelId'] in ['781'] and cg['objList'][cg['leaderID']]['translate'][1] == 0:
            cg['objList'][cg['leaderID']]['translate'][1] = 1.04
        cgs.append(cg)
    # load and process room shapes; 
    room_meta = p2d('.', '/dataset/room/{}/{}f.obj'.format(rj['origin'], rj['modelId']))
    room_polygon = Polygon(room_meta[:, 0:2]) # requires python library 'shapely'
    translate = torch.zeros((len(pend_obj_list), 3)).float()
    orient = torch.zeros((len(pend_obj_list))).float()
    scale = torch.zeros((len(pend_obj_list), 3)).float()
    for i in range(len(pend_obj_list)):
        translate[i][0] = pend_obj_list[i]['translate'][0]
        translate[i][1] = pend_obj_list[i]['translate'][1]
        translate[i][2] = pend_obj_list[i]['translate'][2]
        orient[i] = pend_obj_list[i]['orient']
        scale[i][0] = pend_obj_list[i]['scale'][0]
        scale[i][1] = pend_obj_list[i]['scale'][1]
        scale[i][2] = pend_obj_list[i]['scale'][2]
    # bb = four_points_xz[bbindex].float()
    # max_points = max_bb[bbindex].float()
    # min_points = min_bb[bbindex].float()
    bb = torch.tensor(boundingboxes).float()
    max_points = torch.tensor(max_points).float()
    min_points = torch.tensor(min_points).float()
    for i in range(len(pend_obj_list)):
        bb[i] = rotate_bb_local_para(bb[i], orient[i], scale[i][[0, 2]])
        max_points[i] = transform_a_point(max_points[i], translate[i], orient[i], scale[i])
        min_points[i] = transform_a_point(min_points[i], translate[i], orient[i], scale[i])
    bb_tran = translate.reshape(len(pend_obj_list), 1, 3)[:, :, [0, 2]] + bb # note that bbs are around (0,0,0) after heuristic(cg)
    # calculate bounding box of coherent groups; 
    for gid in range(len(pend_groups)):
        pend_group = pend_groups[gid]
        cg = cgs[gid]
        points = bb_tran[pend_group].reshape(-1, 2)
        max_points_of_cg = max_points[pend_group]
        min_points_of_cg = min_points[pend_group]
        maxp = torch.max(points, dim=0)[0]
        minp = torch.min(points, dim=0)[0]
        cg['bb'] = torch.zeros((4, 2), dtype=torch.float)
        cg['bb'][0] = maxp
        cg['bb'][1][0] = minp[0]
        cg['bb'][1][1] = maxp[1]
        cg['bb'][2] = minp
        cg['bb'][3][0] = maxp[0]
        cg['bb'][3][1] = minp[1]
        cg['height'] = torch.max(max_points_of_cg, dim=0)[0][1].item()
        cg['ground'] = torch.min(min_points_of_cg, dim=0)[0][1].item()
    # generate
    attempt_heuristic(cgs, room_meta, blocks)
    for cg in cgs:
        # the following code is reserving for lifting of each coherent group; 
        cg['translate'][0] += np.sin(cg['orient']) * 0.08
        cg['translate'][2] += np.cos(cg['orient']) * 0.08
        # if cg['objList'][cg['leaderID']]['coarseSemantic'] in ['sink', 'dressing_table', 'picture_frame', 'television', 'mirror', 'clock', 'dining_table']:
        #     cg['translate'][0] += np.sin(cg['orient']) * 0.08
        #     cg['translate'][2] += np.cos(cg['orient']) * 0.08
        for o in cg['objList']:
            o['translate'][0], o['translate'][2] = rotate([0, 0], [o['translate'][0], o['translate'][2]], cg['orient'])
            o['orient'] += cg['orient']
            o['rotate'][0] = 0.0
            o['rotate'][1] = o['orient']
            o['rotate'][2] = 0.0
            o['translate'][0] += cg['translate'][0]
            o['translate'][1] += cg['translate'][1]
            o['translate'][2] += cg['translate'][2]
    # log coherent groups; 
    for i in range(len(pend_obj_list)):
        o = pend_obj_list[i]
        if 'coarseSemantic' not in o:
            break
        print(o['modelId'], o['coarseSemantic'])
        for j in range(len(pend_obj_list)):
            if csrrelation[i][j] == 1.0:
                print("--->>>", pend_obj_list[j]['modelId'], pend_obj_list[j]['coarseSemantic'])
    print("\r\n--- %s secondes ---" % (time.time() - start_time))
    return rj
Пример #9
0
def priors_of_roomShape():
    rj = request.json
    existingCatList = []
    for obj in rj['objList']:
        if obj is None:
            continue
        if 'modelId' not in obj:
            continue
        if obj['modelId'] not in objCatList:
            continue
        if len(objCatList[obj['modelId']]) == 0:
            continue
        if objCatList[obj['modelId']][0] not in existingCatList:
            existingCatList.append(objCatList[obj['modelId']][0])
    existingPendingCatList = existingCatList.copy()
    res = {'object': [], 'prior': [], 'index': [], 'coarseSemantic': {}, 'catMask': []}
    if 'auxiliaryDomObj' in rj:
        if 'heyuindex' in rj['auxiliaryDomObj']:
            res['heyuindex'] = rj['auxiliaryDomObj']['heyuindex']
        for objname in rj['auxiliaryDomObj']['object']:
            if objCatList[objname][0] not in existingPendingCatList:
                existingPendingCatList.append(objCatList[objname][0])
    # print(existingCatList)
    # print(existingPendingCatList)
    # load and process room shapes; 
    room_meta = p2d('.', f'/dataset/room/{rj["origin"]}/{rj["modelId"]}f.obj')
    room_meta = room_meta[:, 0:2]
    wallSecIndices = np.arange(1, len(room_meta)).tolist() + [0]
    res['room_meta'] = room_meta.tolist()
    rv = room_meta[:] - room_meta[wallSecIndices]
    normals = rv[:, [1,0]]
    normals[:, 1] = -normals[:, 1]
    res['room_orient'] = np.arctan2(normals[:, 0], normals[:, 1]).tolist()
    res['room_oriNormal'] = normals.tolist()
    # room_polygon = Polygon(room_meta[:, 0:2]) # requires python library 'shapely'
    # currently, we hack few available coherent groups...
    roomTypeSuggestedList = []
    categoryList = []
    for rt in rj['roomTypes']:
        if 'heyuindex' not in res:
            res['heyuindex'] = np.random.randint(len(roomTypeDemo[rt]))
        categoryList += roomTypeDemo[rt][res['heyuindex']]
        break
    for cat in categoryList:
        if cat in existingCatList:
            continue
        roomTypeSuggestedList.append(random.choice(objListCat[cat]))
    if 'auxiliaryDomObj' not in rj: # if this is the first time calling this function for the pending room...
        res['object'] = roomTypeSuggestedList
    else:
        # re-fuel the pending object list; 
        for newobjname in roomTypeSuggestedList:
            if objCatList[newobjname][0] in existingPendingCatList:
                continue
            rj['auxiliaryDomObj']['object'].append(newobjname)
        res['object'] = rj['auxiliaryDomObj']['object'].copy()
        for objname in rj['auxiliaryDomObj']['object']:
            # if the specific instance is already in the room;
            if objCatList[objname][0] in existingCatList: 
                res['object'].remove(objname)

    if len(res['object']) == 0: # if a recommendation list is full: 
        pass

    # load wall priors; 
    for obj in res['object']:
        with open(f'./latentspace/wdot-4/{obj}.json') as f:
            wallpri = json.load(f)
            res['prior'] += wallpri
            res['index'] += np.full(len(wallpri), obj).tolist()
            res['catMask'] += np.full(len(wallpri), categoryCodec[getobjCat(obj)]).tolist()
    for newobjname in res['object']:
        res['coarseSemantic'][newobjname] = getobjCat(newobjname)
    return json.dumps(res)
Пример #10
0
def fa_layout_pro(rj):
    pend_obj_list = []
    final_obj_list = []
    bbindex = []
    ol = rj['objList']
    print("Total Number of objects: ", len(ol))
    for o in ol:
        if o is None:
            continue
        if o['modelId'] not in obj_semantic:
            final_obj_list.append(o)
            continue
        if 'coarseSemantic' in o:
            if o['coarseSemantic'] in BANNED:
                final_obj_list.append(o)
                continue
        bbindex.append(name_to_ls[o['modelId']])
        pend_obj_list.append(o)
    diag_indices_ = (torch.arange(len(pend_obj_list)),
                     torch.arange(len(pend_obj_list)))
    csrrelation = csrmatrix[bbindex][:, bbindex]
    yrelation = ymatrix[bbindex][:, bbindex]
    wallrelation = wallvector[bbindex]
    cornerrelation = cornervector[bbindex]
    csrrelation[diag_indices_] = 0.0
    SSIZE = 1000
    rng = np.random.default_rng()
    for centerid in range(len(pend_obj_list)):
        center = pend_obj_list[centerid]
        for objid in range(len(pend_obj_list)):
            obj = pend_obj_list[objid]
            priorid = "{}-{}".format(center['modelId'], obj['modelId'])
            if priorid not in priors['pos']:
                with open(PRIORS.format(center['modelId'])) as f:
                    if csrrelation[centerid, objid] == 0.0:
                        theprior = np.zeros((SSIZE, 4), dtype=np.float)
                    else:
                        theprior = np.array(json.load(f)[obj['modelId']],
                                            dtype=np.float)
                    if len(theprior) == 0:
                        theprior = np.zeros((SSIZE, 4), dtype=np.float)
                    while len(theprior) < SSIZE:
                        theprior = np.vstack((theprior, theprior))
                    rng.shuffle(theprior)
                    priors['pos'][priorid] = theprior[:, 0:3]
                    priors['ori'][priorid] = theprior[:, 3].flatten()
                SSIZE = np.min((len(priors['pos'][priorid]), SSIZE))
                priors['pos'][priorid] = torch.from_numpy(
                    priors['pos'][priorid]).float()
                priors['ori'][priorid] = torch.from_numpy(
                    priors['ori'][priorid]).float()
            else:
                SSIZE = np.min((len(priors['pos'][priorid]), SSIZE))
    pos_priors = torch.zeros(len(pend_obj_list), len(pend_obj_list), SSIZE, 3)
    ori_priors = torch.zeros(len(pend_obj_list), len(pend_obj_list), SSIZE)
    for centerid in range(len(pend_obj_list)):
        center = pend_obj_list[centerid]
        for objid in range(len(pend_obj_list)):
            if objid == centerid:
                continue
            obj = pend_obj_list[objid]
            priorid = "{}-{}".format(center['modelId'], obj['modelId'])
            pos_priors[centerid, objid] = rotate_pos_prior(
                priors['pos'][priorid][0:SSIZE],
                torch.tensor(center['orient'], dtype=torch.float))
            ori_priors[centerid, objid] = priors['ori'][priorid][0:SSIZE]
    # making sure that angles are between (-pi, pi)
    ori_priors[ori_priors > np.pi] -= 2 * np.pi
    ori_priors[ori_priors < -np.pi] += 2 * np.pi
    room_meta = p2d(
        '.', '/suncg/room/{}/{}f.obj'.format(rj['origin'], rj['modelId']))
    room_polygon = Polygon(room_meta[:, 0:2])
    room_shape = torch.from_numpy(room_meta[:, 0:2]).float()
    room_shape_norm = torch.from_numpy(room_meta).float()
    translate = torch.zeros((len(pend_obj_list), 2)).float()
    orient = torch.zeros((len(pend_obj_list))).float()
    scale = torch.zeros((len(pend_obj_list), 3)).float()
    # for o in pend_obj_list:
    #     disturbance(o, 0.5, room_polygon)
    for i in range(len(pend_obj_list)):
        translate[i][0] = pend_obj_list[i]['translate'][0]
        translate[i][1] = pend_obj_list[i]['translate'][2]
        orient[i] = pend_obj_list[i]['orient']
        scale[i][0] = pend_obj_list[i]['scale'][0]
        scale[i][1] = pend_obj_list[i]['scale'][1]
        scale[i][2] = pend_obj_list[i]['scale'][2]

    bb = four_points_xz[bbindex].float()
    for i in range(len(pend_obj_list)):
        bb[i] = rotate_bb_local_para(bb[i], orient[i], scale[i][[0, 2]])

    translate.requires_grad_()
    orient.requires_grad_()
    iteration = 0
    # loss = distribution_loss(translate, pos_priors[:, :, :, [0, 2]], csrrelation)
    start_time = time.time()
    loss = distribution_loss_orient(translate, orient, pos_priors[:, :, :,
                                                                  [0, 2]],
                                    ori_priors, csrrelation)
    c_loss = collision_loss(
        translate.reshape(len(pend_obj_list), 1, 2) + bb, room_shape,
        yrelation * (1 - csrrelation), wallrelation, cornerrelation)
    loss += c_loss
    while loss.item() > 0.0 and iteration < MAX_ITERATION:
        print("Start iteration {}...".format(iteration))
        loss.backward()
        # translate.data = translate.data - (1.0 / (1 + torch.sum(csrrelation, dim=1))).reshape(len(pend_obj_list), 1) * translate.grad * 0.05
        translate.data = translate.data - translate.grad * 0.05
        translate.grad = None
        # if orient.grad is not None:
        #     orient.data = orient.data - orient.grad * 0.01
        #     orient.data[orient.data >  np.pi] -= 2 * np.pi
        #     orient.data[orient.data < -np.pi] += 2 * np.pi
        #     orient.grad = None
        # loss = distribution_loss(translate, pos_priors[:, :, :, [0, 2]], csrrelation)
        loss = distribution_loss_orient(translate, orient, pos_priors[:, :, :,
                                                                      [0, 2]],
                                        ori_priors, csrrelation)
        c_loss = collision_loss(
            translate.reshape(len(pend_obj_list), 1, 2) + bb, room_shape,
            yrelation * (1 - csrrelation), wallrelation, cornerrelation)
        loss += c_loss
        iteration += 1
    print("--- %s seconds ---" % (time.time() - start_time))
    for i in range(len(pend_obj_list)):
        o = pend_obj_list[i]
        if 'coarseSemantic' not in o:
            break
        print(o['modelId'], o['coarseSemantic'])
        for j in range(len(pend_obj_list)):
            if csrrelation[i][j] == 1.0:
                print("--->>>", pend_obj_list[j]['modelId'],
                      pend_obj_list[j]['coarseSemantic'])
    print(csrrelation)
    for i in range(len(pend_obj_list)):
        o = pend_obj_list[i]
        o['translate'][0] = translate[i][0].item()
        o['translate'][2] = translate[i][1].item()
        o['rotate'][0] = 0.0
        o['rotate'][1] = orient[i].item()
        o['rotate'][2] = 0.0
        o['orient'] = orient[i].item()
    return rj
Пример #11
0
def hamilton(scene):
    involvedRoomIds = []
    views = []
    # load existing views.
    for fn in os.listdir(f'./latentspace/autoview/{scene["origin"]}'):
        if '.json' not in fn:
            continue
        with open(f'./latentspace/autoview/{scene["origin"]}/{fn}') as f:
            views.append(json.load(f))
    for view in views:
        view['isVisited'] = False
        if view['roomId'] not in involvedRoomIds:
            involvedRoomIds.append(view['roomId'])
    print(involvedRoomIds)
    res = []
    # deciding connections of a floorplan.
    G = nx.Graph()
    for room in scene['rooms']:
        room['isVisited'] = False
        floorMeta = p2d(
            '.', '/dataset/room/{}/{}f.obj'.format(room['origin'],
                                                   room['modelId']))
        try:
            H = sk.getWallHeight(
                f"./dataset/room/{room['origin']}/{room['modelId']}w.obj")
        except:
            continue
        for door in room['objList']:
            if 'coarseSemantic' not in door:
                continue
            if door['coarseSemantic'] not in ['Door', 'door']:
                continue
            if len(door['roomIds']) < 2:
                continue
            # if door['roomIds'][0] not in involvedRoomIds and door['roomIds'][1] not in involvedRoomIds:
            #     continue
            x = (door['bbox']['min'][0] + door['bbox']['max'][0]) / 2
            z = (door['bbox']['min'][2] + door['bbox']['max'][2]) / 2
            DIS = np.Inf
            for wallIndex in range(floorMeta.shape[0]):
                wallIndexNext = (wallIndex + 1) % floorMeta.shape[0]
                dis = sk.pointToLineDistance(np.array([x, z]),
                                             floorMeta[wallIndex, 0:2],
                                             floorMeta[wallIndexNext, 0:2])
                if dis < DIS:
                    DIS = dis
                    direction = np.array(
                        [floorMeta[wallIndex, 2], 0, floorMeta[wallIndex, 3]])
            translate = np.array([x, H / 2, z])
            G.add_edge(door['roomIds'][0],
                       door['roomIds'][1],
                       translate=translate,
                       direction=direction,
                       directionToRoom=room['roomId'])
    pre = nx.dfs_predecessors(G)
    suc = nx.dfs_successors(G)
    print(pre, suc)
    # decide the s and t which are the start point and end point respectively.
    # ndproom = list(nx.dfs_successors(G).keys())[0]
    # ndproom = views[0]['roomId']
    ndproom = involvedRoomIds[0]
    roomOrder = []
    while ndproom != -1:
        roomOrder.append(ndproom)
        scene['rooms'][ndproom]['isVisited'] = True
        ndproom = hamiltonNextRoom(ndproom, pre, suc, scene)
    for room in scene['rooms']:
        room['isVisited'] = False
    print(roomOrder)

    def subPath(s):
        if s == len(roomOrder) - 1:
            return (True, s)
        state = False
        start = roomOrder[s]
        s += 1
        while s < len(roomOrder) and roomOrder[s] != start:
            if roomOrder[s] in involvedRoomIds and not scene['rooms'][
                    roomOrder[s]]['isVisited']:
                state = True
            s += 1
        return (state, s)

    i = 0
    while i < len(roomOrder):
        state, s = subPath(i)
        if not state:
            roomOrder = roomOrder[0:i + 1] + roomOrder[s + 1:]
            i -= 1
        else:
            scene['rooms'][roomOrder[i]]['isVisited'] = True
        i += 1
    print(roomOrder)
    ndproom = roomOrder[0]
    for view in views:
        if view['roomId'] == ndproom:
            ndpNext = view
    # perform the algorithm of Angluin and Valiant.
    for i in range(1, len(roomOrder) + 1):
        while ndpNext is not None:
            ndp = ndpNext
            res.append(ndp)
            ndp['isVisited'] = True
            ndpNext = hamiltonNext(ndp, views, scene)
        if i == len(roomOrder):
            break
        lastndproom = roomOrder[i - 1]
        ndproom = roomOrder[i]
        edge = G[lastndproom][ndproom]
        # if edge['direction'].dot(edge['translate'] - ndp['probe']) < 0:
        if edge['directionToRoom'] != ndproom:
            edge['direction'] = -edge['direction']
        ndpNext = {
            'roomId': ndproom,
            'probe': edge['translate'],
            'origin': edge['translate'].tolist(),
            'target': (edge['translate'] + edge['direction']).tolist(),
            'direction': edge['direction'].tolist()
        }
    with open(f'./latentspace/autoview/{scene["origin"]}/path', 'w') as f:
        json.dump(res, f, default=sk.jsonDumpsDefault)
    return res
Пример #12
0
def autoViewOnePointPerspective(room, scene):
    """
    This function tries generate all potential views w.r.t the One-Point Perspective Rule (OPP Rule). 
    Note that several variants exist w.r.t different rules. 
    """
    theta = (np.pi * scene['PerspectiveCamera']['fov'] / 180) / 2
    floorMeta = p2d(
        '.', '/dataset/room/{}/{}f.obj'.format(room['origin'],
                                               room['modelId']))
    floorPoly = Polygon(floorMeta[:, 0:2])
    H = sk.getWallHeight(
        f"./dataset/room/{room['origin']}/{room['modelId']}w.obj")
    # MAXDIAMETER = sk.roomDiameter(floorMeta)
    # find the anchor point and the anchor wall.
    hypotheses = []
    hypotheses += autoViewsRodrigues(room, scene)
    hypotheses += autoViewTwoWallPerspective(room, scene)
    for wallIndex in range(floorMeta.shape[0]):
        # first get the meta from the target wall.
        wallIndexNext = (wallIndex + 1) % floorMeta.shape[0]
        middlePoint = (floorMeta[wallIndex][0:2] +
                       floorMeta[wallIndexNext][0:2]) / 2
        normal = floorMeta[wallIndex][2:4]
        normal3D = np.array([normal[0], 0, normal[1]])
        # construct the probe lineString.
        p1 = middlePoint
        p2 = middlePoint + normal
        # detect wall decorations including windows.
        targetWallNumWindows = 0
        for r in scene['rooms']:
            for obj in r['objList']:
                if isWindowOnWall(obj, floorMeta[wallIndex][0:2],
                                  floorMeta[wallIndexNext][0:2]):
                    targetWallNumWindows += 1
        targetWallWindoorArea = 0.
        for r in scene['rooms']:
            for obj in r['objList']:
                targetWallWindoorArea += calWindoorArea(
                    obj, floorMeta[wallIndex][0:2],
                    floorMeta[wallIndexNext][0:2])
        for wallJndex in range(floorMeta.shape[0]):
            if wallJndex == wallIndex:
                continue
            if np.dot(floorMeta[wallIndex][2:4],
                      floorMeta[wallJndex][2:4]) >= 0:
                continue
            p3 = floorMeta[wallJndex][0:2]
            p4 = floorMeta[(wallJndex + 1) % floorMeta.shape[0]][0:2]
            # generate the probe point.
            p = twoInfLineIntersection(p1, p2, p3, p4)
            if p is None:
                continue
            # 'probe point' is the most important point which is eventually the camera position (origin).
            p = np.array(p)
            probe = np.array([p[0], H / 2, p[1]])

            # first generate the well-aligned hypothesis.
            h = {}
            h['roomId'] = room['roomId']
            h['type'] = 'wellAligned'
            h['probe'] = probe
            h['direction'] = -normal3D
            h['viewLength'] = np.linalg.norm(middlePoint - p, ord=2)
            h['normal'] = normal.copy()
            h['wallIndex'] = wallIndex
            h['wallJndex'] = wallJndex
            h['floorMeta'] = floorMeta
            numSeenObjs(room, h, probe, -normal3D, floorMeta, theta)
            h['targetWallArea'] = H * np.linalg.norm(
                floorMeta[wallIndex][0:2] - floorMeta[wallIndexNext][0:2],
                ord=2)
            h['targetWallNumWindows'] = targetWallNumWindows
            h['targetWallWindoorArea'] = targetWallWindoorArea
            # tarWindoorArea2021(h, scene, floorMeta, theta)
            h['theta'] = theta
            # hypotheses.append(h)

            # then we try following the 'Three-Wall' rule. (Left Side)
            expandPre, expandNxt = expandWallSeg(wallIndex, floorMeta)
            pThW1 = None
            pThW2 = None
            if expandPre is not None and expandNxt is not None:
                pThW1 = twoInfLineIntersection(
                    expandPre, expandPre + floorMeta[wallIndex][2:4], p3, p4)
                pThW2 = twoInfLineIntersection(
                    expandNxt, expandNxt + floorMeta[wallIndex][2:4], p3, p4)
            if pThW1 is not None and pThW2 is not None:
                pThW1, pThW2 = np.array(pThW1), np.array(pThW2)
                thw = h.copy()
                thw['type'] = 'threeWall'
                thw['pThW1'] = pThW1
                thw['pThW2'] = pThW2
                thw['probe'] = pThW1 + (pThW2 - pThW1) / 3
                thw['probe'] = np.array(
                    [thw['probe'][0], H / 2, thw['probe'][1]])
                # thw['direction'] = np.array([floorMeta[wallIndexNext][0], H/2, floorMeta[wallIndexNext][1]]) - thw['probe']
                # acr = floorMeta[wallIndexNext][0:2] + (floorMeta[wallIndex][0:2] - floorMeta[wallIndexNext][0:2])/3
                acr = expandNxt + (expandPre - expandNxt) / 3
                thw['direction'] = np.array([acr[0], H / 2, acr[1]
                                             ]) - thw['probe']
                thw['direction'] /= np.linalg.norm(thw['direction'])
                thw['direction'] = groundShifting(thw['probe'], floorMeta,
                                                  floorPoly, thw['direction'],
                                                  theta, H)
                thw['viewLength'] = np.linalg.norm(np.array([
                    floorMeta[wallIndexNext][0], H / 2,
                    floorMeta[wallIndexNext][1]
                ]) - thw['probe'],
                                                   ord=2)
                hypotheses.append(thw)

                # then we try following the 'Three-Wall' rule. (Right Side)
                thwR = thw.copy()
                thwR['probe'] = pThW2 + (pThW1 - pThW2) / 3
                thwR['type'] = 'threeWall_R'
                thwR['probe'] = np.array(
                    [thwR['probe'][0], H / 2, thwR['probe'][1]])
                # thwR['direction'] = np.array([floorMeta[wallIndex][0], H/2, floorMeta[wallIndex][1]]) - thwR['probe']
                # acr = floorMeta[wallIndex][0:2] + (floorMeta[wallIndexNext][0:2] - floorMeta[wallIndex][0:2])/3
                acr = expandPre + (expandNxt - expandPre) / 3
                thwR['direction'] = np.array([acr[0], H / 2, acr[1]
                                              ]) - thwR['probe']
                thwR['direction'] /= np.linalg.norm(thwR['direction'])
                thwR['direction'] = groundShifting(thwR['probe'], floorMeta,
                                                   floorPoly,
                                                   thwR['direction'], theta, H)
                thwR['viewLength'] = np.linalg.norm(np.array([
                    floorMeta[wallIndexNext][0], H / 2,
                    floorMeta[wallIndexNext][1]
                ]) - thwR['probe'],
                                                    ord=2)
                hypotheses.append(thwR)

                # then we try following the 'Three-Wall' rule. (Right Side)
                mtm = thw.copy()
                mtm['probe'] = (pThW1 + pThW2) / 2
                mtm['type'] = 'wellAlignedShifted'
                mtm['probe'] = np.array(
                    [mtm['probe'][0], H / 2, mtm['probe'][1]])
                # thwR['direction'] = np.array([floorMeta[wallIndex][0], H/2, floorMeta[wallIndex][1]]) - thwR['probe']
                # acr = floorMeta[wallIndex][0:2] + (floorMeta[wallIndexNext][0:2] - floorMeta[wallIndex][0:2])/3
                acr = (expandNxt + expandPre) / 2
                mtm['direction'] = np.array([acr[0], H / 2, acr[1]
                                             ]) - mtm['probe']
                mtm['direction'] /= np.linalg.norm(mtm['direction'])
                mtm['direction'] = groundShifting(mtm['probe'], floorMeta,
                                                  floorPoly, mtm['direction'],
                                                  theta, H)
                mtm['viewLength'] = np.linalg.norm(np.array([
                    floorMeta[wallIndexNext][0], H / 2,
                    floorMeta[wallIndexNext][1]
                ]) - mtm['probe'],
                                                   ord=2)
                hypotheses.append(mtm)
            # the prefix wall and the suffix wall
            pThW1 = twoInfLineIntersection(
                floorMeta[(wallIndex + floorMeta.shape[0] - 1) %
                          floorMeta.shape[0]][0:2], floorMeta[wallIndex][0:2],
                p3, p4)
            pThW2 = twoInfLineIntersection(
                floorMeta[wallIndexNext][0:2],
                floorMeta[(wallIndexNext + 1) % floorMeta.shape[0]][0:2], p3,
                p4)
            if pThW1 is not None and pThW2 is not None:
                pThW1, pThW2 = np.array(pThW1), np.array(pThW2)
                thinL = h.copy()
                thinL['type'] = 'threeWall_thin'
                thinL['pThW1'] = pThW1
                thinL['pThW2'] = pThW2
                thinL['probe'] = pThW1 + (pThW2 - pThW1) / 3
                thinL['probe'] = np.array(
                    [thinL['probe'][0], H / 2, thinL['probe'][1]])
                acr = floorMeta[wallIndexNext][0:2] + (
                    floorMeta[wallIndex][0:2] -
                    floorMeta[wallIndexNext][0:2]) / 3
                thinL['direction'] = np.array([acr[0], H / 2, acr[1]
                                               ]) - thinL['probe']
                thinL['direction'] /= np.linalg.norm(thinL['direction'])
                thinL['direction'] = groundShifting(thinL['probe'], floorMeta,
                                                    floorPoly,
                                                    thinL['direction'], theta,
                                                    H)
                thinL['viewLength'] = np.linalg.norm(np.array([
                    floorMeta[wallIndexNext][0], H / 2,
                    floorMeta[wallIndexNext][1]
                ]) - thinL['probe'],
                                                     ord=2)
                hypotheses.append(thinL)

                # then we try following the 'Three-Wall' rule. (Right Side)
                thinR = thw.copy()
                thinR['probe'] = pThW2 + (pThW1 - pThW2) / 3
                thinR['type'] = 'threeWall_R_thin'
                thinR['probe'] = np.array(
                    [thinR['probe'][0], H / 2, thinR['probe'][1]])
                acr = floorMeta[wallIndex][0:2] + (
                    floorMeta[wallIndexNext][0:2] -
                    floorMeta[wallIndex][0:2]) / 3
                thinR['direction'] = np.array([acr[0], H / 2, acr[1]
                                               ]) - thinR['probe']
                thinR['direction'] /= np.linalg.norm(thinR['direction'])
                thinR['direction'] = groundShifting(thinR['probe'], floorMeta,
                                                    floorPoly,
                                                    thinR['direction'], theta,
                                                    H)
                thinR['viewLength'] = np.linalg.norm(np.array([
                    floorMeta[wallIndexNext][0], H / 2,
                    floorMeta[wallIndexNext][1]
                ]) - thinR['probe'],
                                                     ord=2)
                hypotheses.append(thinR)
    hypotheses = redundancyRemove(hypotheses)
    for h in hypotheses:
        h['roomTypes'] = room['roomTypes']
        h['isObjCovered'] = isObjCovered(h, scene)
        theLawOfTheThird(h, room, theta, ASPECT)
        numSeenObjs(room, h, h['probe'], h['direction'], floorMeta, theta)
        tarWindoorArea2021(h, scene, floorMeta, theta)
        layoutConstraint(h, room, theta)
        wallNormalOffset(h, floorMeta)
        toOriginAndTarget(h)
    hypotheses.sort(key=probabilityOPP, reverse=True)
    for rank, h in zip(range(0, len(hypotheses)), hypotheses):
        h['rank'] = rank
    bestViews = {
        'wellAlignedShifted': None,
        'threeWall_R': None,
        'threeWall': None,
        'againstMidWall': None,
        'twoWallPerspective': None
    }
    for h in hypotheses:
        for viewTps in bestViews:
            if viewTps != h['type']:
                continue
            if bestViews[viewTps] is None:
                bestViews[viewTps] = toOriginAndTarget(h)
    # bestViews = []
    # numOfChosen = min(3, len(hypotheses))
    # for index in range(0, numOfChosen):
    #     h = hypotheses[index]
    #     bestViews.append(toOriginAndTarget(h))
    return hypotheses
Пример #13
0
def autoViewTwoWallPerspective(room, scene):
    fov = scene['PerspectiveCamera']['fov']
    # change the fov/2 to Radian.
    theta = (np.pi * fov / 180) / 2
    focal = 1 / np.tan(theta)
    tanPhi = ASPECT / focal
    floorMeta = p2d(
        '.', '/dataset/room/{}/{}f.obj'.format(room['origin'],
                                               room['modelId']))
    floorPoly = Polygon(floorMeta[:, 0:2])
    H = sk.getWallHeight(
        f"./dataset/room/{room['origin']}/{room['modelId']}w.obj")
    pcams = []
    for wallDiagIndex in range(floorMeta.shape[0]):
        pcam = {}
        iPre = (wallDiagIndex + floorMeta.shape[0] - 1) % floorMeta.shape[0]
        iNxt = (wallDiagIndex + 1) % floorMeta.shape[0]
        iPreP = floorMeta[iPre][0:2]
        iNxtP = floorMeta[iNxt][0:2]
        # extend two walls as far as possible:
        preList = []
        nxtList = []
        for i in range(floorMeta.shape[0]):
            if i == iPre or i == wallDiagIndex:
                continue
            p3 = floorMeta[i][0:2]
            p4 = floorMeta[(i + 1) % floorMeta.shape[0]][0:2]
            _p = twoInfLineIntersection(floorMeta[wallDiagIndex][0:2], iPreP,
                                        p3, p4)
            if _p is None:
                continue
            _p = np.array(_p)
            if np.dot(_p - floorMeta[wallDiagIndex][0:2],
                      floorMeta[wallDiagIndex][2:4]) < 0:
                continue
            preList.append(_p)
        for i in range(floorMeta.shape[0]):
            if i == iPre or i == wallDiagIndex:
                continue
            p3 = floorMeta[i][0:2]
            p4 = floorMeta[(i + 1) % floorMeta.shape[0]][0:2]
            _p = twoInfLineIntersection(floorMeta[wallDiagIndex][0:2], iNxtP,
                                        p3, p4)
            if _p is None:
                continue
            _p = np.array(_p)
            if np.dot(_p - floorMeta[wallDiagIndex][0:2],
                      floorMeta[iPre][2:4]) < 0:
                continue
            nxtList.append(_p)
        MAXdis = -1
        for pl in preList:
            for nl in nxtList:
                if checkPtoN(pl, nl, floorMeta):
                    dis = np.linalg.norm(pl - nl)
                    if MAXdis < dis:
                        MAXdis = dis
                        iPreP = pl
                        iNxtP = nl
        direction = iNxtP - iPreP
        direction = direction[[1, 0]]
        direction[1] = -direction[1]
        direction /= np.linalg.norm(direction, ord=2)
        probe = (iNxtP + iPreP) / 2
        if np.dot(direction, floorMeta[wallDiagIndex][0:2] - probe) < 0:
            direction = -direction
        dis = np.linalg.norm(probe - iNxtP, ord=2) / tanPhi
        probe = probe - direction * dis
        pcam['viewLength'] = np.linalg.norm(probe -
                                            floorMeta[wallDiagIndex][0:2],
                                            ord=2)
        if not floorPoly.contains(Point(probe[0], probe[1])):
            p1 = probe
            p2 = probe + direction * dis
            _plist = []
            for i in range(floorMeta.shape[0]):
                p3 = floorMeta[i][0:2]
                p4 = floorMeta[(i + 1) % floorMeta.shape[0]][0:2]
                _p = twoInfLineIntersection(p1, p2, p3, p4)
                if _p is None:
                    continue
                if np.dot(direction, np.array(_p) - p2) > 0:
                    continue
                _plist.append(_p)
            if len(_plist) > 0:
                _i = np.argmin(np.linalg.norm(np.array(_plist), axis=1))
                probe = _plist[_i]
        else:
            probe = probe.tolist()
        if not floorPoly.contains(Point(probe[0], probe[1])):
            continue
        probe.insert(1, H / 2)
        probe = np.array(probe)
        direction = direction.tolist()
        direction.insert(1, 0)
        direction = groundShifting(probe, floorMeta, floorPoly,
                                   np.array(direction), theta, H)
        pcam['probe'] = probe
        pcam['direction'] = direction
        pcam['theta'] = theta
        pcam['roomId'] = room['roomId']
        pcam['wallDiagIndex'] = wallDiagIndex
        pcam['type'] = 'twoWallPerspective'
        pcam['floorMeta'] = floorMeta
        pcams.append(pcam)
    return pcams