示例#1
0
        out.write(json.dumps(m))
    out.write('\n]\n')


def write_data_template_to_file():
    random.seed(38)
    template = generate_data_template(N)
    with open('./data/day-durations-template.json', 'w') as outfile:
        print_data_template_as_json(template, outfile)


#write_data_template_to_file()

#### Generic functions across all models

raw_training_data = json.load(io.open("./data/day-durations-training.json"))
raw_test_data = json.load(io.open("./data/day-durations-test.json"))


def parse_daylight_s(dl_raw):
    hours, minutes = dl_raw.split(':')
    return (3600 * float(hours)) + (60 * float(minutes))


def parse_year_fraction(date_raw, lng):
    ## TODO use lng
    y, m, d = date_raw.split('-')
    return (datetime.datetime(int(y), int(m), int(d)).timestamp() -
            epoch_2019) / year_duration_s

示例#2
0
    def write_pcd(self, file_format, filepath, only_save_points=False):
        """ Write a pcd to a to-be-specified file format """
        if file_format == "ply":
            row_array = self.points
            if only_save_points == True:
                new_array = [tuple(row) for row in row_array.tolist()]
                vertices = numpy.array(new_array,
                                       dtype=[("x", numpy.float64),
                                              ("y", numpy.float64),
                                              ("z", numpy.float64)])
            else:
                if self.has_normals() == True:
                    row_array = numpy.hstack((row_array, self.normals))
                else:
                    pass
                if self.has_curvature() == True:
                    row_array = numpy.hstack((row_array, self.curvature))
                else:
                    pass

                if row_array.shape[1] == 3:
                    new_array = [tuple(row) for row in row_array.tolist()]
                    vertices = numpy.array(new_array,
                                           dtype=[("x", numpy.float64),
                                                  ("y", numpy.float64),
                                                  ("z", numpy.float64)])
                elif row_array.shape[1] == 6:
                    new_array = [tuple(row) for row in row_array.tolist()]
                    vertices = numpy.array(new_array,
                                           dtype=[("x", numpy.float64),
                                                  ("y", numpy.float64),
                                                  ("z", numpy.float64),
                                                  ("nx", numpy.float64),
                                                  ("ny", numpy.float64),
                                                  ("nz", numpy.float64)])
                elif row_array.shape[1] == 7:
                    new_array = [tuple(row) for row in row_array.tolist()]
                    vertices = numpy.array(new_array,
                                           dtype=[("x", numpy.float64),
                                                  ("y", numpy.float64),
                                                  ("z", numpy.float64),
                                                  ("nx", numpy.float64),
                                                  ("ny", numpy.float64),
                                                  ("nz", numpy.float64),
                                                  ("quality", numpy.float64)])
                else:
                    raise Exception(
                        "Unknown 7th column! Specify values. Currently supported: Points, Normals, Curvature!"
                    )

            el1 = PlyElement.describe(vertices, "vertex")
            PlyData([el1], text=True).write(filepath)

        elif file_format == "txt":
            row_array = self.points

            if only_save_points == True:
                numpy.savetxt(filepath, row_array)
            else:
                if self.has_normals() == True:
                    row_array = numpy.hstack((row_array, self.normals))
                else:
                    pass
                if self.has_curvature() == True:
                    row_array = numpy.hstack((row_array, self.curvature))
                else:
                    pass

                numpy.savetxt(filepath, row_array)

        elif file_format == "yml" or file_format == "yaml":
            data_dict = []
            data_dict.append({
                "X": float(self.points[:, 0]),
                "Y": float(self.points[:, 1]),
                "Z": float(self.points[:, 2])
            })
            #~ for i in range(self.points.shape[0]):
            #~ data_dict.append({"X": float(self.points[i][0]), "Y": float(self.points[i][1]), "Z": float(self.points[i][2])})

            with io.open(filepath, "w", encoding="utf8") as outfile:
                yaml.dump(data_dict,
                          outfile,
                          default_flow_style=False,
                          allow_unicode=True)

        else:
            raise TypeError("Specified file format not supported!")

        return
示例#3
0
    def read_pcd(self, file_path):
        """ Read in a point cloud from to-be-specified file format into numpy array """
        file_name, file_extension = os.path.splitext(file_path)

        if file_extension == ".yml" or file_extension == ".yaml":
            stream = io.open(filepath, "r")
            data_loaded = yaml.safe_load(stream)
            #~ data_numpy = numpy.zeros((len(data_loaded), len(data_loaded[0].keys())))
            #~ for index in range(len(data_loaded)):
            #~ data_numpy[index] = data_loaded[index]["X"], data_loaded[index]["Y"], data_loaded[index]["Z"]
            self.points[:] = data_loaded[:]["X"], data_loaded[:][
                "Y"], data_loaded[:]["Z"]

        elif file_extension == ".mat":
            # QUICK HACK
            #~ self.points = scipy.io.loadmat(file_path)["pc"] # inner_sheet.mat
            self.points = scipy.io.loadmat(file_path)["pci"]
            if self.points.shape[1] > 3:
                raise Exception(
                    "Currently only point information can be loaded within this format! Switch to .xyz or .ply format!"
                )

        elif file_extension == ".xyz" or file_extension == ".txt":
            self.points = numpy.loadtxt(file_path)
            # We have xyz files with additional normals and maybe curvature as columns
            if self.points.shape[1] > 3 and self.points.shape[1] < 7:
                self.normals = self.points[:, 3:6]
                self.points = numpy.delete(self.points, [3, 4, 5], axis=1)
            else:
                pass
            if self.points.shape[1] > 6:
                self.normals = self.points[:, 3:6]
                self.curvature = self.points[:, 6]
                self.curvature = self.curvature.reshape(
                    (self.curvature.shape[0], 1))
                self.points = numpy.delete(self.points, [3, 4, 5, 6], axis=1)
            else:
                pass

        elif file_extension == ".ply":
            with open(file_path, "rb") as f:
                plydata = PlyData.read(f)

                properties = plydata.elements[0].data.dtype.names
                self.points = numpy.zeros((plydata.elements[0].data.size, 3))
                self.points.T[0], self.points.T[1], self.points.T[
                    2] = plydata.elements[0].data["x"][:], plydata.elements[
                        0].data["y"][:], plydata.elements[0].data["z"][:]
                # We may have more than just point information
                if len(properties) > 3:
                    self.normals = numpy.zeros(
                        (plydata.elements[0].data.size, 3))
                    self.normals.T[0], self.normals.T[1], self.normals.T[
                        2] = plydata.elements[0].data[
                            "nx"][:], plydata.elements[0].data[
                                "ny"][:], plydata.elements[0].data["nz"][:]
                else:
                    pass
                # We may have additional curvature information. Meshlab saves this under "quality"
                if len(properties) > 6:
                    self.curvature = plydata.elements[0].data["quality"]
                    self.curvature = self.curvature.reshape(
                        (self.curvature.shape[0], 1))
                else:
                    pass

        elif file_extension == ".asc" or ".csv":
            with open(file_path) as f:
                data = csv.reader(f, delimiter=" ")
                point_list = []
                normals_list = []
                curvature_list = []
                for row in data:
                    point_list.append(row[0:3])
                    if len(row) > 3:
                        normals_list.append(row[3:6])
                    else:
                        pass
                    if len(row) > 6:
                        curvature_list.append(row[6])
                    else:
                        pass
                self.points = numpy.array(point_list, dtype=numpy.float64)
                if normals_list:
                    self.normals = numpy.array(normals_list,
                                               dtype=numpy.float64)
                else:
                    pass
                if curvature_list:
                    self.curvature = numpy.array(curvature_list,
                                                 dtype=numpy.float64)
                else:
                    pass

        elif file_extension == ".stl":  # This extension might get cancelled and we use meshlab for big data
            model_mesh = mesh.Mesh.from_file(file_path)
            model_mesh.vectors  # This will give a triplet of vectors for each vertex
            self.normals = model_mesh.normals
            # TODO Clear this process! Which format do we get from this? We need to delete duplicate points, because we dont care about triangle information!
            ipdb.set_trace()
            self.points = numpy.vstack(
                (model_mesh.v0, model_mesh.v1, model_mesh.v2))

        else:
            raise Exception(
                "File format not supported. Only input .xyz or .ply point clouds!"
            )

        if self.points is None == True:
            raise Exception("Loaded file was empty")

        return
示例#4
0
def write_2D_txt(my_list, the_filename):
    #with open(the_filename, 'w') as f:
    #for s in my_list:
    #f.write(str(s[:])+'\n' )
    with io.open(the_filename, 'wb') as f:
        f.writelines(line for line in my_list)
示例#5
0
def internalToCocoGTDemo(dataType='train2017',
                         dataDir='../..',
                         imgCount=float('inf'),
                         stuffStartId=92,
                         stuffEndId=182,
                         mergeThings=True,
                         indent=None,
                         includeCrowd=False,
                         outputAnnots=True):
    '''
    Converts our internal .mat representation of the ground-truth annotations to COCO format.
    :param dataType: the name of the subset: train201x, val201x, test-dev201x or test201x
    :param dataDir: location of the COCO root folder
    :param imgCount: the number of images to use for the .json file
    :param stuffStartId: id where stuff classes start
    :param stuffEndId: id where stuff classes end
    :param mergeThings: merges all 91 thing classes into a single class 'other' with id 183
    :param indent: number of whitespaces used for JSON indentation
    :param includeCrowd: whether to include 'crowd' thing annotations as 'other' (or void)
    :param outputAnnots: whether to include annotations (for test images we only release ids)
    :return: None
    '''

    # Define paths
    imgCountStr = ('_%d' % imgCount) if imgCount < float('inf') else ''
    annotFolder = '%s/annotations/internal/%s' % (dataDir, dataType)
    annPath = '%s/annotations/instances_%s.json' % (dataDir, dataType)
    if outputAnnots:
        jsonPath = '%s/annotations/stuff_%s%s.json' % (dataDir, dataType,
                                                       imgCountStr)
    else:
        jsonPath = '%s/annotations/stuff_image_info_%s%s.json' % (
            dataDir, dataType, imgCountStr)

    # Check if output file already exists
    if os.path.exists(jsonPath):
        raise Exception('Error: Output file already exists: %s' % jsonPath)

    # Check if input folder exists
    if not os.path.exists(annotFolder):
        raise Exception('Error: Input folder does not exist: %s' % annotFolder)

    # Get images
    imgNames = os.listdir(annotFolder)
    imgNames = [
        imgName[:-4] for imgName in imgNames if imgName.endswith('.mat')
    ]
    imgNames.sort()
    if imgCount < len(imgNames):
        imgNames = imgNames[0:imgCount]
    imgCount = len(imgNames)
    imgIds = [int(imgName) for imgName in imgNames]

    # Load COCO API for things
    cocoGt = COCO(annPath)

    # Init
    # annId must be unique, >=1 and cannot overlap with the detection annotations
    if dataType == 'train2017':
        annIdStart = int(1e7)
    elif dataType == 'val2017':
        annIdStart = int(2e7)
    elif dataType == 'test-dev2017':
        annIdStart = int(3e7)
    elif dataType == 'test2017':
        annIdStart = int(4e7)
    else:
        raise Exception('Error: Unknown dataType %s specified!' % dataType)
    annId = annIdStart
    startTime = time.clock()

    print("Writing JSON metadata...")
    with io.open(jsonPath, 'w', encoding='utf8') as output:
        # Write info
        infodata = {
            'description': 'COCO 2017 Stuff Dataset',
            'url': 'http://cocodataset.org',
            'version': '1.0',
            'year': 2017,
            'contributor':
            'H. Caesar, J. Uijlings, M. Maire, T.-Y. Lin, P. Dollar and V. Ferrari',
            'date_created': '2017-08-31 00:00:00.0'
        }
        infodata = {'info': infodata}
        infoStr = json.dumps(infodata, indent=indent)
        infoStr = infoStr[1:-1] + ',\n'  # Remove brackets and add comma

        # Write images
        imdata = [i for i in cocoGt.dataset['images'] if i['id'] in imgIds]
        imdata = {'images': imdata}
        imStr = json.dumps(imdata, indent=indent)
        imStr = imStr[1:-1] + ',\n'  # Remove brackets and add comma

        # Write licenses
        licdata = {'licenses': cocoGt.dataset['licenses']}
        licStr = json.dumps(licdata, indent=indent)
        licStr = licStr[1:-1] + ',\n'  # Remove brackets and add comma

        # Write categories
        catdata = []
        catdata.extend([{
            'id': 92,
            'name': 'banner',
            'supercategory': 'textile'
        }, {
            'id': 93,
            'name': 'blanket',
            'supercategory': 'textile'
        }, {
            'id': 94,
            'name': 'branch',
            'supercategory': 'plant'
        }, {
            'id': 95,
            'name': 'bridge',
            'supercategory': 'building'
        }, {
            'id': 96,
            'name': 'building-other',
            'supercategory': 'building'
        }, {
            'id': 97,
            'name': 'bush',
            'supercategory': 'plant'
        }, {
            'id': 98,
            'name': 'cabinet',
            'supercategory': 'furniture-stuff'
        }, {
            'id': 99,
            'name': 'cage',
            'supercategory': 'structural'
        }, {
            'id': 100,
            'name': 'cardboard',
            'supercategory': 'raw-material'
        }, {
            'id': 101,
            'name': 'carpet',
            'supercategory': 'floor'
        }, {
            'id': 102,
            'name': 'ceiling-other',
            'supercategory': 'ceiling'
        }, {
            'id': 103,
            'name': 'ceiling-tile',
            'supercategory': 'ceiling'
        }, {
            'id': 104,
            'name': 'cloth',
            'supercategory': 'textile'
        }, {
            'id': 105,
            'name': 'clothes',
            'supercategory': 'textile'
        }, {
            'id': 106,
            'name': 'clouds',
            'supercategory': 'sky'
        }, {
            'id': 107,
            'name': 'counter',
            'supercategory': 'furniture-stuff'
        }, {
            'id': 108,
            'name': 'cupboard',
            'supercategory': 'furniture-stuff'
        }, {
            'id': 109,
            'name': 'curtain',
            'supercategory': 'textile'
        }, {
            'id': 110,
            'name': 'desk-stuff',
            'supercategory': 'furniture-stuff'
        }, {
            'id': 111,
            'name': 'dirt',
            'supercategory': 'ground'
        }, {
            'id': 112,
            'name': 'door-stuff',
            'supercategory': 'furniture-stuff'
        }, {
            'id': 113,
            'name': 'fence',
            'supercategory': 'structural'
        }, {
            'id': 114,
            'name': 'floor-marble',
            'supercategory': 'floor'
        }, {
            'id': 115,
            'name': 'floor-other',
            'supercategory': 'floor'
        }, {
            'id': 116,
            'name': 'floor-stone',
            'supercategory': 'floor'
        }, {
            'id': 117,
            'name': 'floor-tile',
            'supercategory': 'floor'
        }, {
            'id': 118,
            'name': 'floor-wood',
            'supercategory': 'floor'
        }, {
            'id': 119,
            'name': 'flower',
            'supercategory': 'plant'
        }, {
            'id': 120,
            'name': 'fog',
            'supercategory': 'water'
        }, {
            'id': 121,
            'name': 'food-other',
            'supercategory': 'food-stuff'
        }, {
            'id': 122,
            'name': 'fruit',
            'supercategory': 'food-stuff'
        }, {
            'id': 123,
            'name': 'furniture-other',
            'supercategory': 'furniture-stuff'
        }, {
            'id': 124,
            'name': 'grass',
            'supercategory': 'plant'
        }, {
            'id': 125,
            'name': 'gravel',
            'supercategory': 'ground'
        }, {
            'id': 126,
            'name': 'ground-other',
            'supercategory': 'ground'
        }, {
            'id': 127,
            'name': 'hill',
            'supercategory': 'solid'
        }, {
            'id': 128,
            'name': 'house',
            'supercategory': 'building'
        }, {
            'id': 129,
            'name': 'leaves',
            'supercategory': 'plant'
        }, {
            'id': 130,
            'name': 'light',
            'supercategory': 'furniture-stuff'
        }, {
            'id': 131,
            'name': 'mat',
            'supercategory': 'textile'
        }, {
            'id': 132,
            'name': 'metal',
            'supercategory': 'raw-material'
        }, {
            'id': 133,
            'name': 'mirror-stuff',
            'supercategory': 'furniture-stuff'
        }, {
            'id': 134,
            'name': 'moss',
            'supercategory': 'plant'
        }, {
            'id': 135,
            'name': 'mountain',
            'supercategory': 'solid'
        }, {
            'id': 136,
            'name': 'mud',
            'supercategory': 'ground'
        }, {
            'id': 137,
            'name': 'napkin',
            'supercategory': 'textile'
        }, {
            'id': 138,
            'name': 'net',
            'supercategory': 'structural'
        }, {
            'id': 139,
            'name': 'paper',
            'supercategory': 'raw-material'
        }, {
            'id': 140,
            'name': 'pavement',
            'supercategory': 'ground'
        }, {
            'id': 141,
            'name': 'pillow',
            'supercategory': 'textile'
        }, {
            'id': 142,
            'name': 'plant-other',
            'supercategory': 'plant'
        }, {
            'id': 143,
            'name': 'plastic',
            'supercategory': 'raw-material'
        }, {
            'id': 144,
            'name': 'platform',
            'supercategory': 'ground'
        }, {
            'id': 145,
            'name': 'playingfield',
            'supercategory': 'ground'
        }, {
            'id': 146,
            'name': 'railing',
            'supercategory': 'structural'
        }, {
            'id': 147,
            'name': 'railroad',
            'supercategory': 'ground'
        }, {
            'id': 148,
            'name': 'river',
            'supercategory': 'water'
        }, {
            'id': 149,
            'name': 'road',
            'supercategory': 'ground'
        }, {
            'id': 150,
            'name': 'rock',
            'supercategory': 'solid'
        }, {
            'id': 151,
            'name': 'roof',
            'supercategory': 'building'
        }, {
            'id': 152,
            'name': 'rug',
            'supercategory': 'textile'
        }, {
            'id': 153,
            'name': 'salad',
            'supercategory': 'food-stuff'
        }, {
            'id': 154,
            'name': 'sand',
            'supercategory': 'ground'
        }, {
            'id': 155,
            'name': 'sea',
            'supercategory': 'water'
        }, {
            'id': 156,
            'name': 'shelf',
            'supercategory': 'furniture-stuff'
        }, {
            'id': 157,
            'name': 'sky-other',
            'supercategory': 'sky'
        }, {
            'id': 158,
            'name': 'skyscraper',
            'supercategory': 'building'
        }, {
            'id': 159,
            'name': 'snow',
            'supercategory': 'ground'
        }, {
            'id': 160,
            'name': 'solid-other',
            'supercategory': 'solid'
        }, {
            'id': 161,
            'name': 'stairs',
            'supercategory': 'furniture-stuff'
        }, {
            'id': 162,
            'name': 'stone',
            'supercategory': 'solid'
        }, {
            'id': 163,
            'name': 'straw',
            'supercategory': 'plant'
        }, {
            'id': 164,
            'name': 'structural-other',
            'supercategory': 'structural'
        }, {
            'id': 165,
            'name': 'table',
            'supercategory': 'furniture-stuff'
        }, {
            'id': 166,
            'name': 'tent',
            'supercategory': 'building'
        }, {
            'id': 167,
            'name': 'textile-other',
            'supercategory': 'textile'
        }, {
            'id': 168,
            'name': 'towel',
            'supercategory': 'textile'
        }, {
            'id': 169,
            'name': 'tree',
            'supercategory': 'plant'
        }, {
            'id': 170,
            'name': 'vegetable',
            'supercategory': 'food-stuff'
        }, {
            'id': 171,
            'name': 'wall-brick',
            'supercategory': 'wall'
        }, {
            'id': 172,
            'name': 'wall-concrete',
            'supercategory': 'wall'
        }, {
            'id': 173,
            'name': 'wall-other',
            'supercategory': 'wall'
        }, {
            'id': 174,
            'name': 'wall-panel',
            'supercategory': 'wall'
        }, {
            'id': 175,
            'name': 'wall-stone',
            'supercategory': 'wall'
        }, {
            'id': 176,
            'name': 'wall-tile',
            'supercategory': 'wall'
        }, {
            'id': 177,
            'name': 'wall-wood',
            'supercategory': 'wall'
        }, {
            'id': 178,
            'name': 'water-other',
            'supercategory': 'water'
        }, {
            'id': 179,
            'name': 'waterdrops',
            'supercategory': 'water'
        }, {
            'id': 180,
            'name': 'window-blind',
            'supercategory': 'window'
        }, {
            'id': 181,
            'name': 'window-other',
            'supercategory': 'window'
        }, {
            'id': 182,
            'name': 'wood',
            'supercategory': 'solid'
        }])
        if mergeThings:
            catdata.extend([{
                'id': stuffEndId + 1,
                'name': 'other',
                'supercategory': 'other'
            }])
        catdata = {'categories': catdata}
        catStr = json.dumps(catdata, indent=indent)
        catStr = catStr[1:-1]  # Remove brackets

        # Write opening braces, headers and annotation start to disk
        output.write(unicode('{\n' + infoStr + imStr + licStr + catStr))

        # Start annots
        if outputAnnots:
            output.write(unicode(',\n"annotations": \n[\n'))
            for i, imgName in enumerate(imgNames):

                # Write annotations
                imgId = imgIds[i]
                diffTime = time.clock() - startTime
                print "Writing JSON annotation %d of %d (%.1fs): %s..." % (
                    i + 1, imgCount, diffTime, imgName)

                # Read annotation file
                annotPath = os.path.join(annotFolder, imgName)
                matfile = scipy.io.loadmat(annotPath)
                labelMap = matfile['S']
                if not np.all(
                    [i == 0 or i >= stuffStartId
                     for i in np.unique(labelMap)]):
                    raise Exception(
                        'Error: .mat annotation files should not contain thing labels!'
                    )

                # Merge thing classes
                if mergeThings:
                    # Get thing GT
                    labelMapThings = cocoSegmentationToSegmentationMap(
                        cocoGt,
                        imgId,
                        checkUniquePixelLabel=False,
                        includeCrowd=includeCrowd)
                    if labelMap.shape[0] != labelMapThings.shape[0] \
                        or labelMap.shape[1] != labelMapThings.shape[1]:
                        raise Exception(
                            'Error: Stuff segmentation map has different size from thing segmentation map!'
                        )

                    # Set all thing classes to the new 'other' class
                    labelMap[labelMapThings > 0] = stuffEndId + 1

                # Add stuff annotations
                labelsAll = np.unique(labelMap)
                labelsValid = [i for i in labelsAll if i >= stuffStartId]
                for i, labelId in enumerate(labelsValid):
                    # Add a comma and line break after each annotation
                    assert annId - annIdStart <= 1e7, 'Error: Annotation ids are not unique!'
                    if annId == annIdStart:
                        annotStr = ''
                    else:
                        annotStr = ',\n'

                    # Create mask and encode it
                    Rs = segmentationToCocoMask(labelMap, labelId)

                    # Create annotation data
                    anndata = {}
                    anndata['id'] = annId
                    anndata['image_id'] = int(imgId)
                    anndata['category_id'] = int(labelId)
                    anndata['segmentation'] = Rs
                    anndata['area'] = float(mask.area(Rs))
                    anndata['bbox'] = mask.toBbox(Rs).tolist()
                    anndata['iscrowd'] = 0

                    # Write JSON
                    annotStr = annotStr + json.dumps(anndata, indent=indent)
                    output.write(unicode(annotStr))

                    # Increment annId
                    annId = annId + 1

            # End annots
            output.write(unicode('\n]'))

        # Global end
        output.write(unicode('\n}'))
# Get images
imageList = glob.glob(annotFolder + '/*.mat')
imageCount = len(imageList)
imageIds = [int(imageName[-16:-4]) for imageName in imageList]

# Load COCO API
print("Loading COCO annotations...")
with open(annPath) as annFile:
    data = json.load(annFile)

# Init
annId = 0

print("Writing JSON metadata...")
with io.open(jsonPath, 'w', encoding='utf8') as outfile:
    # Global start
    outfile.write(unicode('{\n'))

    # Write info
    infodata = {'description': 'This is the 1.1 release of the COCO-Stuff (10K) dataset.',
                'url': 'https://github.com/nightrome/cocostuff',
                'version': '1.1',
                'year': 2017,
                'contributor': 'H. Caesar, J. Uijlings and V. Ferrari',
                'date_created': '2017-04-06 12:00:00.0'},
    infodata = {'info': infodata}
    str_ = json.dumps(infodata, indent=indent, sort_keys=True, separators=separators, ensure_ascii=ensure_ascii)
    str_ = str_[1:-2] + ',\n'  # Remove brackets and add comma
    outfile.write(unicode(str_))
示例#7
0
        "name": "scissors"
    }, {
        "supercategory": "indoor",
        "id": 88,
        "name": "teddy bear"
    }, {
        "supercategory": "indoor",
        "id": 89,
        "name": "hair drier"
    }, {
        "supercategory": "indoor",
        "id": 90,
        "name": "toothbrush"
    }]
})

try:
    to_unicode = unicode
except NameError:
    to_unicode = str

# print(json.dumps(json_info, indent=4))
# json.dump(json_info, '/media/user_home1/lcastillo/biologia/intances_minival2014.json')   instances_train2014.json or instances_minival2014.json
with io.open('/media/user_home1/lcastillo/biologia/instances_train2014.json',
             'w',
             encoding='utf8') as outfile:
    # str_ = json.dumps(json_info,
    #                   indent=0, sort_keys=True,
    #                   separators=(',', ': '), ensure_ascii=False)
    str_ = json.dumps(json_info)
    outfile.write(to_unicode(str_))
示例#8
0
文件: io.py 项目: nadoss/nems_db
def baphy_parm_read(filepath):
    log.info("Loading {0}".format(filepath))

    f = io.open(filepath, "r")
    s = f.readlines(-1)

    globalparams = {}
    exptparams = {}
    exptevents = {}

    for ts in s:
        sout = baphy_mat2py(ts)
        # print(sout)
        try:
            exec(sout)
        except KeyError:
            ts1 = sout.split('= [')
            ts1 = ts1[0].split(',[')

            s1 = ts1[0].split('[')
            sout1 = "[".join(s1[:-1]) + ' = {}'
            try:
                exec(sout1)
            except:
                s2 = sout1.split('[')
                sout2 = "[".join(s2[:-1]) + ' = {}'
                try:
                    exec(sout2)
                except:
                    s3 = sout2.split('[')
                    sout3 = "[".join(s3[:-1]) + ' = {}'
                    try:
                        exec(sout3)
                    except:
                        s4 = sout3.split('[')
                        sout4 = "[".join(s4[:-1]) + ' = {}'
                        exec(sout4)
                        exec(sout3)
                    exec(sout2)
                exec(sout1)
            exec(sout)
        except NameError:
            log.info("NameError on: {0}".format(sout))
        except:
            log.info("Other error on: {0} to {1}".format(ts,sout))

    # special conversions

    # convert exptevents to a DataFrame:
    t = [exptevents[k] for k in exptevents]
    d = pd.DataFrame(t)
    if 'ClockStartTime' in d.columns:
        exptevents = d.drop(['Rove', 'ClockStartTime'], axis=1)
    elif 'Rove' in d.columns:
        exptevents = d.drop(['Rove'], axis=1)
    else:
        exptevents = d
    # rename columns to NEMS standard epoch names
    exptevents.columns = ['name', 'start', 'end', 'Trial']
    for i in range(len(exptevents)):
        if exptevents.loc[i, 'end'] == []:
            exptevents.loc[i, 'end'] = exptevents.loc[i, 'start']

    if 'ReferenceClass' not in exptparams['TrialObject'][1].keys():
        exptparams['TrialObject'][1]['ReferenceClass'] = \
           exptparams['TrialObject'][1]['ReferenceHandle'][1]['descriptor']
    # CPP special case, deletes added commas
    if exptparams['TrialObject'][1]['ReferenceClass'] == 'ContextProbe':
        tags = exptparams['TrialObject'][1]['ReferenceHandle'][1]['Names']  # gets the list of tags
        tag_map = {oldtag: re.sub(r' , ', r'  ', oldtag)
                   for oldtag in tags}  # eliminates commas with regexp and maps old tag to new commales tag
        # places the commaless tags back in place
        exptparams['TrialObject'][1]['ReferenceHandle'][1]['Names'] = list(tag_map.values())
        # extends the tag map adding pre stim and post prefix, and Reference sufix
        epoch_map = dict()
        for sufix, tag in product(['PreStimSilence', 'Stim', 'PostStimSilence'], tags):
            key = '{} , {} , Reference'.format(sufix, tag)
            val = '{} , {} , Reference'.format(sufix, tag_map[tag])
            epoch_map[key] = val
        # replaces exptevents names using the map, i.e. get rid of commas
        exptevents.replace(epoch_map, inplace=True)

    return globalparams, exptparams, exptevents
示例#9
0
        heapq.heappush(self.heap, (prob, complete, prefix))
        if len(self.heap) > self.beam_width:
            heapq.heappop(self.heap)

    def __iter__(self):
        return iter(self.heap)


##################################################################################################################################

if __name__ == '__main__':

    #with io.open(raw_input_data_dir+'/PreProcOut/refcoco_refrnn.json', 'r', encoding='utf-8') as captions_f:
    #with io.open(raw_input_data_dir+'/PreProcOut/refcoco_refrnn_compositionalspl.json', 'r', encoding='utf-8') as captions_f:
    with io.open('/media/compute/vol/dsg/lilian/testrun/refcoco_refrnn.json',
                 'r',
                 encoding='utf-8') as captions_f:
        captions_data = json.load(captions_f)['images']
    #features = scipy.io.loadmat(raw_input_data_dir+'/visual_genome_vgg19_feats_small.mat')['feats'].T #image features matrix are transposed
    #features = scipy.io.loadmat(raw_input_data_dir+'/ExtrFeatsOut/refcoco_vgg19_rnnpreproc.mat')['feats'].T #image features matrix are transposed
    features = scipy.io.loadmat(
        '/media/compute/vol/dsg/lilian/testrun/refcoco_vgg19_rnnpreproc.mat'
    )['feats'].T

    raw_dataset = {
        'train': {
            'filenames': list(),
            'images': list(),
            'captions': list()
        },
        'val': {
# Get images
imageList = glob.glob(annotFolder + '/*.mat')
imageCount = len(imageList)
imageIds = [int(imageName[-16:-4]) for imageName in imageList]

# Load COCO API
print("Loading COCO annotations...")
with open(annPath) as annFile:
    data = json.load(annFile)

# Init
annId = 0

print("Writing JSON metadata...")
with io.open(jsonPath, 'w', encoding='utf8') as outfile:
    # Global start
    outfile.write(unicode('{\n'))

    # Write info
    infodata = {
        'description':
        'This is the 1.1 release of the COCO-Stuff (10K) dataset.',
        'url': 'https://github.com/nightrome/cocostuff',
        'version': '1.1',
        'year': 2017,
        'contributor': 'H. Caesar, J. Uijlings and V. Ferrari',
        'date_created': '2017-04-06 12:00:00.0'
    },
    infodata = {'info': infodata}
    str_ = json.dumps(infodata,