示例#1
0
def update_gridfs_with_synsets(
        synsets,
        fs,
        force=True,
        imagenet_username='******',
        accesskey='bd662acb4866553500f17babd5992810e0b5a439'):
    filenames_dict = cPickle.loads(fs.get('filenames_dict.p').read())
    for i, synset in enumerate(synsets):
        filenames = []
        url = 'http://www.image-net.org/download/synset?' + \
              'wnid=' + str(synset) + \
              '&username='******'&accesskey=' + accesskey + \
              '&release=latest'
        print i
        print url
        url_file = urlopen(url)
        tar_file = tarfile.open(fileobj=url_file, mode='r|')

        for tar_info in tar_file:
            filename = tar_info.name
            not_uploaded = True
            while not_uploaded:
                try:
                    if force:
                        fs.delete(filename)
                        print 'Overwriting ' + filename
                    filenames.append(filename)
                    fs.put(tar_file.extractfile(tar_info), _id=filename)
                    not_uploaded = False
                except IOError:
                    print filename + ' Failed'
        filenames_dict[synset] = filenames
    fs.delete('filenames_dict.p')
    file_obj = open(
        os.path.join(get_data_home(), 'imagenet', 'filenames_dict.p'), 'wb')
    cPickle.dump(filenames_dict, file_obj)
    file_obj.close()
    file_obj = open(
        os.path.join(get_data_home(), 'imagenet', 'filenames_dict.p'), 'rb')
    fs.put(file_obj, _id='filenames_dict.p')
示例#2
0
def update_gridfs_with_synsets(
    synsets, fs, force=True, imagenet_username="******", accesskey="bd662acb4866553500f17babd5992810e0b5a439"
):
    filenames_dict = cPickle.loads(fs.get("filenames_dict.p").read())
    for i, synset in enumerate(synsets):
        filenames = []
        url = (
            "http://www.image-net.org/download/synset?"
            + "wnid="
            + str(synset)
            + "&username="******"&accesskey="
            + accesskey
            + "&release=latest"
        )
        print i
        print url
        url_file = urlopen(url)
        tar_file = tarfile.open(fileobj=url_file, mode="r|")

        for tar_info in tar_file:
            filename = tar_info.name
            not_uploaded = True
            while not_uploaded:
                try:
                    if force:
                        fs.delete(filename)
                        print "Overwriting " + filename
                    filenames.append(filename)
                    fs.put(tar_file.extractfile(tar_info), _id=filename)
                    not_uploaded = False
                except IOError:
                    print filename + " Failed"
        filenames_dict[synset] = filenames
    fs.delete("filenames_dict.p")
    file_obj = open(os.path.join(get_data_home(), "imagenet", "filenames_dict.p"), "wb")
    cPickle.dump(filenames_dict, file_obj)
    file_obj.close()
    file_obj = open(os.path.join(get_data_home(), "imagenet", "filenames_dict.p"), "rb")
    fs.put(file_obj, _id="filenames_dict.p")
示例#3
0
 def home(self, *suffix_paths):
     return path.join(get_data_home(), 'lfw', self.name, *suffix_paths)
示例#4
0
 def home(self, *suffix_paths):
     return os.path.join(get_data_home(), self.name, *suffix_paths)
示例#5
0
import os
import urllib
import json
import os
from skdata.data_home import get_data_home

BASE_URL = 'http://50.19.109.25'
MODEL_URL = BASE_URL + ':9999/3dmodels?'
S3_URL = "http://dicarlocox-datasets.s3.amazonaws.com"
s3_resource_bucket = 'genthor-resources'
s3_old_model_bucket = 'dicarlocox-3dmodels-v1'

BASE_NAME = 'genthor'
# Scikits data genthor directory
GENTHOR_PATH = os.path.join(get_data_home(), BASE_NAME)
# Resource root directory
RESOURCE_PATH = os.path.join(GENTHOR_PATH, "resources")
# Resource root directory
CACHE_PATH = os.path.join(GENTHOR_PATH, "cache")
# background root directory
BACKGROUND_PATH = os.path.join(RESOURCE_PATH, "backgrounds")
# .obj model root directory
OBJ_PATH = os.path.join(RESOURCE_PATH, "objs")
# .egg model root directory
EGG_PATH = os.path.join(RESOURCE_PATH, "eggs")
# .bam model root directory
BAM_PATH = os.path.join(RESOURCE_PATH, "bams")
HUMAN_PATH = os.path.join(RESOURCE_PATH, "human_data")
TEXTURE_PATH = os.path.join(RESOURCE_PATH, "textures")

def get_canonical_view(m):
示例#6
0
 def home(self, *suffix_paths):
     return os.path.join(get_data_home(), 'vanhateren', self.name,
                         *suffix_paths)
示例#7
0
 def imagenet_home(self, *suffix_paths):
     return os.path.join(get_data_home(), 'imagenet', *suffix_paths)
示例#8
0
import os
import urllib
import json
import os
from skdata.data_home import get_data_home

S3_URL = "http://dicarlocox-datasets.s3.amazonaws.com"
s3_resource_bucket = 'genthor-resources'
s3_old_model_bucket = 'dicarlocox-3dmodels-v1'

BASE_NAME = 'genthor'
# Scikits data genthor directory
GENTHOR_PATH = os.path.join(get_data_home(), BASE_NAME)
# Resource root directory
RESOURCE_PATH = os.path.join(GENTHOR_PATH, "resources")
# Resource root directory
CACHE_PATH = os.path.join(GENTHOR_PATH, "cache")
# background root directory
BACKGROUND_PATH = os.path.join(RESOURCE_PATH, "backgrounds")
# .obj model root directory
OBJ_PATH = os.path.join(RESOURCE_PATH, "objs")
# .egg model root directory
EGG_PATH = os.path.join(RESOURCE_PATH, "eggs")
# .bam model root directory
BAM_PATH = os.path.join(RESOURCE_PATH, "bams")
HUMAN_PATH = os.path.join(RESOURCE_PATH, "human_data")
TEXTURE_PATH = os.path.join(RESOURCE_PATH, "textures")


def splitext2(pth, splitpoint=0):
    """ Better splitext than os.path's (take optional arg that lets
示例#9
0
 def imagenet_home(self, *suffix_paths):
     return os.path.join(get_data_home(), "imagenet", *suffix_paths)