Ejemplo n.º 1
0
def maybe_download_and_extract():
    """
    Download and extract the weather-data if the data-files don't
    already exist in the data_dir.
    """

    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 2
0
def maybe_download():
    print("Downloading Inception 5h Model ...")

    # The file on the internet is not stored in a compressed format.
    # This function should not extract the file when it does not have
    # a relevant filename-extensions such as .zip or .tar.gz
    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 3
0
def maybe_download_and_extract():
    """
    data_path(원하는 경로를 처음에 설정해라) 에 존재하지 않는다면 CIFAR-10 데이터셋을
    다운로드 하고 추출한다
    """

    download.maybe_download_and_extract(url=data_url, download_dir=data_path)
Ejemplo n.º 4
0
def maybe_download_and_extract():
    """
    Download and extract the CIFAR-10 data-set if it doesn't already exist
    in data_path (set this variable first to the desired path).
    """

    download.maybe_download_and_extract(url=data_url, download_dir=data_path)
Ejemplo n.º 5
0
def maybe_download_and_extract():
    """
    Download and extract the data-set if it doesn't already exist
    in data_dir (set this variable first to the desired directory).
    """

    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 6
0
def maybe_download_and_extract():
    """
    Download and extract the weather-data if the data-files don't
    already exist in the data_dir.
    """

    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 7
0
def download_data(in_dir, url):
    

    if not os.path.exists(in_dir):
        os.makedirs(in_dir)
    
    download.maybe_download_and_extract(url,in_dir)
Ejemplo n.º 8
0
def maybe_download_and_extract():
    """
    Download and extract the IMDB Review data-set if it doesn't already exist
    in data_dir (set this variable first to the desired directory).
    """

    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 9
0
def maybe_download_and_extract():
    """
    Download and extract the CIFAR-10 data-set if it doesn't already exist
    in data_path (set this variable first to the desired path).
    """

    download.maybe_download_and_extract(url=data_url, download_dir=data_path)
Ejemplo n.º 10
0
def download_data(in_dir, url):

    # Si la carpeta no existe la creamos
    if not os.path.exists(in_dir):
        os.makedirs(in_dir)

    # Para descargar del link directo y extraer los archivos
    download.maybe_download_and_extract(url, in_dir)
Ejemplo n.º 11
0
def maybe_download():
    """
    Download the Inception model from the internet if it does not already
    exist in the data_dir. The file is about 50 MB.
    """

    print("Downloading Inception 5h Model ...")
    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 12
0
def maybe_download():
    """
    Download the Inception model from the internet if it does not already
    exist in the data_dir. The file is about 85 MB.
    """

    print("Downloading Inception v3 Model ...")
    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 13
0
def maybe_download():
    """
    data_dir에 존재하지 않으면, 인터넷으로부터 인셉션 모델을 다운로드한다
    이 파일은 약 85 MB 다.
    """

    print("Downloading Inception v3 Model ...")
    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 14
0
def maybe_download_and_extract(data_path):
    # URL for the data-set on the internet.
    data_url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
    """
    Download and extract the CIFAR-10 data-set if it doesn't already exist
    in data_path (set this variable first to the desired path).
    """

    download.maybe_download_and_extract(url=data_url, download_dir=data_path)
Ejemplo n.º 15
0
def maybe_download():
    # Download the VGG16 model from the internet if it does not already
    # exist in the data_dir. WARNING! The file is about 550 MB.

    print("Downloading VGG16 Model ...")

    # The file on the internet is not stored in a compressed format.
    # This function should not extract the file when it does not have
    # a relevant filename-extensions such as .zip or .tar.gz
    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 16
0
def maybe_download_and_extract():

    filenames = [
        "zips/train2017.zip", "zips/val2017.zip",
        "annotations/annotations_trainval2017.zip"
    ]

    for filename in filenames:
        url = data_url + filename
        print("Downloading " + url)
        download.maybe_download_and_extract(url=url, download_dir=data_dir)
Ejemplo n.º 17
0
def maybe_download():
    """
    data_dir 안에 이미 존재하지 않으면, 인터넷으로부터 VGG16 모델을
    다운로드 받는다. 경고! 이 파일은 약 550 MB 이다.
    """

    print("Downloading VGG16 Model ...")

    # 인터넷에서 이 파일은 압축된 포맷으로 저장되어 있지 않다.
    # 이 함수는 파일 확장자가 .zip 이나 tar.gz 이 아닌 경우에 추출해서는 안 된다.
    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 18
0
def maybe_download():
    """
    Baixa o modelo VGG16 da internet se ainda não
    existe no data_dir. ATENÇÃO! O arquivo tem cerca de 550 MB.
    """

    print("Downloading VGG16 Model ...")

    # O arquivo na internet não é armazenado em um formato comprimido.
    # Esta função não deve extrair o arquivo quando não tiver
    # um arquivo relevante - extensões como .zip ou .tar.gz
    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 19
0
def maybe_download_and_extract(language_code="da"):
    """
    Download and extract the Europarl data-set if the data-file doesn't
    already exist in data_dir. The data-set is for translating between
    English and the given language-code (e.g. 'da' for Danish, see the
    list of available language-codes above).
    """

    # Create the full URL for the file with this data-set.
    url = data_url + language_code + "-en.tgz"

    download.maybe_download_and_extract(url=url, download_dir=data_dir)
Ejemplo n.º 20
0
def maybe_download_and_extract(language_code="da"):
    """
    Download and extract the Europarl data-set if the data-file doesn't
    already exist in data_dir. The data-set is for translating between
    English and the given language-code (e.g. 'da' for Danish, see the
    list of available language-codes above).
    """

    # Create the full URL for the file with this data-set.
    url = data_url + language_code + "-en.tgz"

    download.maybe_download_and_extract(url=url, download_dir=data_dir)
Ejemplo n.º 21
0
def maybe_download():
    """
    Download the VGG16 model from the internet if it does not already
    exist in the data_dir. WARNING! The file is about 550 MB.
    """

    print("Downloading VGG16 Model ...")

    # The file on the internet is not stored in a compressed format.
    # This function should not extract the file when it does not have
    # a relevant filename-extensions such as .zip or .tar.gz
    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 22
0
def downloader(url, path):
    download.maybe_download_and_extract(url, path)
    dirCreator(meta, path)

    for batch in batches:
        print(batch)
        sep = batchLoader(batch)
        if batch == validation:
            sepSaver(sep, 'validation', batch)
        elif batch == test:
            sepSaver(sep, 'test', batch)
        else:
            sepSaver(sep, 'train', batch)
Ejemplo n.º 23
0
def maybe_download_and_extract():
    """
    Download and extract the COCO data-set if the data-files don't
    already exist in data_dir.
    """

    # Filenames to download from the internet.
    filenames = ["zips/train2017.zip", "zips/val2017.zip",
                 "annotations/annotations_trainval2017.zip"]

    # Download these files.
    for filename in filenames:
        # Create the full URL for the given file.
        url = data_url + filename

        print("Downloading " + url)

        download.maybe_download_and_extract(url=url, download_dir=data_dir)
Ejemplo n.º 24
0
acc_list = []
count = 0
retrain = 0
parent_dir = 'assets/'
# lr = 1e-5
lr = 1e-4
crates = {'cov1': 0., 'cov2': 0., 'fc1': 0., 'fc2': 0., 'fc3': 0.}

retrain_cnt = 0
roundrobin = 0
with_biases = False

# check npy files
url = 'http://www.cs.toronto.edu/~guerzhoy/tf_alexnet/bvlc_alexnet.npy'
download_dir = '.'
download.maybe_download_and_extract(url, download_dir)
INITIAL_TRAIN = True
if (INITIAL_TRAIN):
    # TRAIN
    param = [
        ('-cRates', crates),
        ('-first_time', True),
        # ('-first_time', False),
        ('-train', False),
        ('-test', True),
        ('-prune', False),
        # ('-test', True),
        ('-lr', lr),
        ('-with_biases', with_biases),
        ('-parent_dir', parent_dir),
        ('-lambda1', 1e-5),
def main(data_set, data_dir='data/', create_csv_lists=True):
    # urls for all data sets:
    url_iiw = 'http://labelmaterial.s3.amazonaws.com/release/iiw-dataset-release-0.zip'

    url_sintel_complete = 'http://files.is.tue.mpg.de/sintel/MPI-Sintel-complete.zip'

    url_sintel_albedo = 'http://files.is.tue.mpg.de/jwulff/sintel/extras/MPI-Sintel-training_albedo_noshadingtextures.zip'
    url_sintel_images = 'http://files.is.tue.mpg.de/jwulff/sintel/extras/MPI-Sintel-training_clean_noshadingtextures.zip'
    url_sintel_shading = 'http://files.is.tue.mpg.de/jwulff/sintel/extras/MPI-Sintel-training_shading.zip'

    url_mit = 'http://people.csail.mit.edu/rgrosse/intrinsic/intrinsic-data.tar.gz'

    # directory of data
    data_dir_sintel_complete = data_dir + 'mpi-sintel-complete/'
    data_dir_sintel_shading = data_dir + 'mpi-sintel-shading/'
    data_dir_mit = data_dir + 'MIT-intrinsic/data/'
    data_dir_iiw = data_dir + 'iiw-dataset/data/'

    if data_set == 'iiw':
        download.maybe_download_and_extract(url=url_iiw, download_dir=data_dir)

        if create_csv_lists:
            # import file names of data directory:
            df_iiw = pd.DataFrame(
                [[
                    int(os.path.splitext(os.path.basename(x))[0]),
                    os.path.relpath(x, data_dir),
                    os.path.splitext(os.path.relpath(x, data_dir))[0] + '.json'
                ] for x in glob.glob(data_dir_iiw + '/*.png')],
                columns=['file_id', 'image_path', 'label_path'])
            # sort by file ids (we can sort these files because they are
            # shuffled during training in tf anyways):
            df_iiw.sort_values(by='file_id', inplace=True)
            # reset indices of pd.DataFrame:
            df_iiw.reset_index(drop=True, inplace=True)

            # get training validation and testing data set of the iiw data:
            df_iiw_train, df_iiw_valid, df_iiw_test, df_iiw_train_sample, \
            df_iiw_valid_sample, \
            df_iiw_test_sample = create_datasets_iiw(df=df_iiw, p_train=0.8,
                                                     data_dir=data_dir,
                                                     p_valid=0.1,
                                                     p_test=0.1,
                                                     sample=True)

            # save complete data set, training data set, validation data set and
            # testing data set in separate data files:
            df_iiw.to_csv(path_or_buf=data_dir + 'data_iiw_complete.csv',
                          sep=',',
                          columns=['image_path', 'label_path'],
                          index=False,
                          header=False)
            df_iiw_train.to_csv(path_or_buf=data_dir + 'data_iiw_train.csv',
                                sep=',',
                                columns=['image_path', 'label_path'],
                                index=False,
                                header=False)
            df_iiw_valid.to_csv(path_or_buf=data_dir + 'data_iiw_valid.csv',
                                sep=',',
                                columns=['image_path', 'label_path'],
                                index=False,
                                header=False)
            df_iiw_test.to_csv(path_or_buf=data_dir + 'data_iiw_test.csv',
                               sep=',',
                               columns=['image_path', 'label_path'],
                               index=False,
                               header=False)

            df_iiw_train_sample.to_csv(path_or_buf=data_dir +
                                       'sample_data_iiw_train.csv',
                                       sep=',',
                                       columns=['image_path', 'label_path'],
                                       index=False,
                                       header=False)
            df_iiw_valid_sample.to_csv(path_or_buf=data_dir +
                                       'sample_data_iiw_valid.csv',
                                       sep=',',
                                       columns=['image_path', 'label_path'],
                                       index=False,
                                       header=False)
            df_iiw_test_sample.to_csv(path_or_buf=data_dir +
                                      'sample_data_iiw_test.csv',
                                      sep=',',
                                      columns=['image_path', 'label_path'],
                                      index=False,
                                      header=False)

    elif data_set == 'mpi_sintel_complete':
        data_dir_sintel_complete = data_dir + 'mpi-sintel-complete/'
        download.maybe_download_and_extract(
            url=url_sintel_complete, download_dir=data_dir_sintel_complete)
        if create_csv_lists:
            # use 'clean pass' images (see narihira2015: p.3: "'final images'
            # [...] are the result of additional computer graphics tricks which
            # dristract from our application."):
            df_sintel = pd.DataFrame(
                [[
                    os.path.relpath(x, data_dir),
                    os.path.relpath(x, data_dir).replace('clean', 'albedo'),
                    os.path.relpath(x, data_dir).replace('clean', 'invalid')
                ] for x in glob.glob(data_dir_sintel_complete +
                                     'training/clean/**/*.png')],
                columns=['image_path', 'label_path', 'invalid_path'])
            # add scene to dataframe (for splitting into train/valid/test sets):
            df_sintel['scene_c'] = df_sintel['image_path'].apply(
                lambda row: row.split('/')[-2:-1]).apply(pd.Series)

            # get training validation and testing data set of the mpi-sintel
            #  data:
            df_sintel_train, df_sintel_valid, df_sintel_test, df_sintel_train_sample, \
                df_sintel_valid_sample, df_sintel_test_sample = create_datasets_sintel(df_sintel)

            # save complete data set, training data set, validation data set and
            # testing data set in separate data files:

            df_sintel.to_csv(
                path_or_buf=data_dir + 'data_sintel_complete_complete.csv',
                sep=',',
                columns=['image_path', 'label_path', 'invalid_path'],
                index=False,
                header=False)
            df_sintel_train.to_csv(
                path_or_buf=data_dir + 'data_sintel_complete_train.csv',
                sep=',',
                columns=['image_path', 'label_path', 'invalid_path'],
                index=False,
                header=False)
            df_sintel_valid.to_csv(
                path_or_buf=data_dir + 'data_sintel_complete_valid.csv',
                sep=',',
                columns=['image_path', 'label_path', 'invalid_path'],
                index=False,
                header=False)
            df_sintel_test.to_csv(
                path_or_buf=data_dir + 'data_sintel_complete_test.csv',
                sep=',',
                columns=['image_path', 'label_path', 'invalid_path'],
                index=False,
                header=False)
            df_sintel_train_sample.to_csv(
                path_or_buf=data_dir + 'sample_data_sintel_complete_train.csv',
                sep=',',
                columns=['image_path', 'label_path', 'invalid_path'],
                index=False,
                header=False)
            df_sintel_valid_sample.to_csv(
                path_or_buf=data_dir + 'sample_data_sintel_complete_valid.csv',
                sep=',',
                columns=['image_path', 'label_path', 'invalid_path'],
                index=False,
                header=False)
            df_sintel_test_sample.to_csv(
                path_or_buf=data_dir + 'sample_data_sintel_complete_test.csv',
                sep=',',
                columns=['image_path', 'label_path', 'invalid_path'],
                index=False,
                header=False)

            # also save (unknown) test files:
            df_sintel_test_unknown = pd.DataFrame(
                [[os.path.relpath(x, data_dir), None, None]
                 for x in glob.glob(data_dir_sintel_complete +
                                    'test/clean/**/*.png')],
                columns=['image_path', 'label_path', 'invalid_path'])

            df_sintel_test_unknown.to_csv(
                path_or_buf=data_dir + 'data_sintel_complete_test_unknown.csv',
                sep=',',
                columns=['image_path', 'label_path', 'invalid_path'],
                index=False,
                header=False)

    elif data_set == 'mpi_sintel_shading':

        # The problem is that the shading files (\*\*/out\_*.png)
        # are named differently than the clean/albodo files
        # (\*\*/frame\_\*.png).
        # Also their numbering does not start with 1, 2, ...
        # Therefore we import each file path (clean, albedo and shading)
        # separately, sort it by the scene and frame (increasing) and
        # merge the three paths. Furthermore we have to get rid of some
        # scenes which are not included in either clean or albedo or shading.

        # maybe download data if necessary:
        download.maybe_download_and_extract(url=url_sintel_images,
                                            download_dir=data_dir +
                                            'mpi-sintel-shading/')
        download.maybe_download_and_extract(url=url_sintel_albedo,
                                            download_dir=data_dir +
                                            'mpi-sintel-shading/')
        download.maybe_download_and_extract(url=url_sintel_shading,
                                            download_dir=data_dir +
                                            'mpi-sintel-shading/')

        if create_csv_lists:
            # import images and labels separateley:
            df_clean = pd.DataFrame([
                os.path.relpath(x, data_dir)
                for x in glob.glob(data_dir_sintel_shading +
                                   'clean_noshadingtextures/**/*.png')
            ],
                                    columns=['image_path'])
            df_clean[['scene', 'frame']] = df_clean['image_path'].apply(
                lambda row: row.split('/')[-2:]).apply(pd.Series)
            df_albedo = pd.DataFrame([
                os.path.relpath(x, data_dir)
                for x in glob.glob(data_dir_sintel_shading +
                                   'albedo_noshadingtextures/**/*.png')
            ],
                                     columns=['albedo_label_path'])
            df_albedo[['scene',
                       'frame']] = df_albedo['albedo_label_path'].apply(
                           lambda row: row.split('/')[-2:]).apply(pd.Series)
            df_shading = pd.DataFrame([
                os.path.relpath(x, data_dir)
                for x in glob.glob(data_dir_sintel_shading +
                                   'shading/**/*.png')
            ],
                                      columns=['shading_label_path'])
            df_shading[['scene',
                        'frame']] = df_shading['shading_label_path'].apply(
                            lambda row: row.split('/')[-2:]).apply(pd.Series)
            try:
                df_invalid = pd.DataFrame([
                    os.path.relpath(x, data_dir)
                    for x in glob.glob(data_dir_sintel_complete +
                                       'training/invalid/**/*.png')
                ],
                                          columns=['invalid_path'])
                df_invalid[['scene',
                            'frame']] = df_invalid['invalid_path'].apply(
                                lambda row: row.split('/')[-2:]).apply(
                                    pd.Series)
            except KeyError:
                print('We need to download and extract the ' +
                      'mpi_sintel_complete dataset first to get the invalid ' +
                      'pixel mask.')
                main(data_set='mpi_sintel_complete',
                     data_dir=data_dir,
                     create_csv_lists=False)
                df_invalid = pd.DataFrame([
                    os.path.relpath(x, data_dir)
                    for x in glob.glob(data_dir_sintel_complete +
                                       'training/invalid/**/*.png')
                ],
                                          columns=['invalid_path'])
                df_invalid[['scene',
                            'frame']] = df_invalid['invalid_path'].apply(
                                lambda row: row.split('/')[-2:]).apply(
                                    pd.Series)

            # get list which contains scenes which have to be deleted:
            lst_del = [
                list(df_albedo[~df_albedo['scene'].isin(
                    df_clean['scene'].unique())]['scene'].unique()) +
                list(df_clean[~df_clean['scene'].isin(
                    df_shading['scene'].unique())]['scene'].unique()) +
                list(df_shading[~df_shading['scene'].isin(
                    df_invalid['scene'].unique())]['scene'].unique()) +
                list(df_invalid[~df_invalid['scene'].isin(
                    df_albedo['scene'].unique())]['scene'].unique())
            ][0]

            # delete scenes from lst_del, sort by ('scene', 'frame') and reset
            # index:
            df_clean = df_clean[~df_clean['scene'].isin(lst_del)]
            df_clean = df_clean.sort_values(['scene', 'frame'])
            df_clean.reset_index(drop=True, inplace=True)
            df_albedo = df_albedo[~df_albedo['scene'].isin(lst_del)]
            df_albedo = df_albedo.sort_values(['scene', 'frame'])
            df_albedo.reset_index(drop=True, inplace=True)
            df_shading = df_shading[~df_shading['scene'].isin(lst_del)]
            df_shading = df_shading.sort_values(['scene', 'frame'])
            df_shading.reset_index(drop=True, inplace=True)
            df_invalid = df_invalid[~df_invalid['scene'].isin(lst_del)]
            df_invalid = df_invalid.sort_values(['scene', 'frame'])
            df_invalid.reset_index(drop=True, inplace=True)

            # merge all four DataFrames and keep just important paths:
            df_merged = df_clean.merge(df_albedo,
                                       left_index=True,
                                       right_index=True,
                                       how='inner',
                                       suffixes=('_c', '_a'))
            df_merged = df_merged.merge(df_shading,
                                        left_index=True,
                                        right_index=True,
                                        how='inner',
                                        suffixes=('', '_s'))
            df_sintel2 = df_merged.merge(df_invalid,
                                         left_index=True,
                                         right_index=True,
                                         how='inner',
                                         suffixes=('_s', '_i'))[[
                                             'image_path', 'albedo_label_path',
                                             'shading_label_path',
                                             'invalid_path', 'scene_c'
                                         ]]

            df_sintel_train2, df_sintel_valid2, df_sintel_test2, df_sintel_train_sample2, \
                df_sintel_valid_sample2, df_sintel_test_sample2 = create_datasets_sintel(df_sintel2)

            # save complete data set, training data set, validation data set and
            # testing data set in separate data files:
            df_sintel2.to_csv(
                path_or_buf=data_dir + 'data_sintel_shading_complete.csv',
                sep=',',
                columns=[
                    'image_path', 'albedo_label_path', 'shading_label_path',
                    'invalid_path'
                ],
                index=False,
                header=False)
            df_sintel_train2.to_csv(
                path_or_buf=data_dir + 'data_sintel_shading_train.csv',
                sep=',',
                columns=[
                    'image_path', 'albedo_label_path', 'shading_label_path',
                    'invalid_path'
                ],
                index=False,
                header=False)
            df_sintel_valid2.to_csv(
                path_or_buf=data_dir + 'data_sintel_shading_valid.csv',
                sep=',',
                columns=[
                    'image_path', 'albedo_label_path', 'shading_label_path',
                    'invalid_path'
                ],
                index=False,
                header=False)
            df_sintel_test2.to_csv(
                path_or_buf=data_dir + 'data_sintel_shading_test.csv',
                sep=',',
                columns=[
                    'image_path', 'albedo_label_path', 'shading_label_path',
                    'invalid_path'
                ],
                index=False,
                header=False)
            df_sintel_train_sample2.to_csv(
                path_or_buf=data_dir + 'sample_data_sintel_shading_train.csv',
                sep=',',
                columns=[
                    'image_path', 'albedo_label_path', 'shading_label_path',
                    'invalid_path'
                ],
                index=False,
                header=False)
            df_sintel_valid_sample2.to_csv(
                path_or_buf=data_dir + 'sample_data_sintel_shading_valid.csv',
                sep=',',
                columns=[
                    'image_path', 'albedo_label_path', 'shading_label_path',
                    'invalid_path'
                ],
                index=False,
                header=False)
            df_sintel_test_sample2.to_csv(
                path_or_buf=data_dir + 'sample_data_sintel_shading_test.csv',
                sep=',',
                columns=[
                    'image_path', 'albedo_label_path', 'shading_label_path',
                    'invalid_path'
                ],
                index=False,
                header=False)

    elif data_set == 'mit':
        # maybe download data if necessary:
        download.maybe_download_and_extract(url=url_mit, download_dir=data_dir)

        if create_csv_lists:
            df_mit = pd.DataFrame([[
                os.path.relpath(x, data_dir),
                os.path.relpath(x, data_dir).replace('original',
                                                     'reflectance'),
                os.path.relpath(x, data_dir).replace('original', 'shading')
            ] for x in glob.glob(data_dir_mit + '**/original.png')],
                                  columns=[
                                      'image_path', 'albedo_label_path',
                                      'shading_label_path'
                                  ])

            # get training validation and testing data set of the mit data:
            df_mit_train, df_mit_valid, df_mit_test, _, _, \
            _ = create_datasets_mit(df=df_mit, p_train=0.8,
                                    p_valid=0.1, p_test=0.1, sample=False)

            # save complete data set, training data set, validation data set and
            # testing data set in separate data files:
            df_mit.to_csv(path_or_buf=data_dir + 'data_mit_complete.csv',
                          sep=',',
                          columns=[
                              'image_path', 'albedo_label_path',
                              'shading_label_path'
                          ],
                          index=False,
                          header=False)
            df_mit_train.to_csv(path_or_buf=data_dir + 'data_mit_train.csv',
                                sep=',',
                                columns=[
                                    'image_path', 'albedo_label_path',
                                    'shading_label_path'
                                ],
                                index=False,
                                header=False)
            df_mit_valid.to_csv(path_or_buf=data_dir + 'data_mit_valid.csv',
                                sep=',',
                                columns=[
                                    'image_path', 'albedo_label_path',
                                    'shading_label_path'
                                ],
                                index=False,
                                header=False)
            df_mit_test.to_csv(path_or_buf=data_dir + 'data_mit_test.csv',
                               sep=',',
                               columns=[
                                   'image_path', 'albedo_label_path',
                                   'shading_label_path'
                               ],
                               index=False,
                               header=False)
    else:
        raise ValueError("data_set must be in ['iiw', 'mpi_sintel_shading', " +
                         "'mpi_sintel_complete', 'mit']")
Ejemplo n.º 26
0
def maybe_download_and_extract(language_code="el"):

    # Create the full URL for the file with this data-set.
    url = data_url + language_code + "-en.tgz"

    download.maybe_download_and_extract(url=url, download_dir=data_dir)
Ejemplo n.º 27
0
def maybe_download():

    print("Downloading Inception 5h Model ...")
    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 28
0
def maybe_download_and_extract(language_code="da"):

    url = data_url + language_code + "-en.tgz"

    download.maybe_download_and_extract(url=url, download_dir=data_dir)
Ejemplo n.º 29
0
def maybe_download():

    print("Downloading VGG16 Model ...")

    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 30
0
def get_mnist_data(url, data_dir):
    print("Downloading {} into {}".format(url, data_dir))
    download.maybe_download_and_extract(url, data_dir)
Ejemplo n.º 31
0
def maybe_download_and_extract():
    url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
    download.maybe_download_and_extract(url, '.')
Ejemplo n.º 32
0
def download_data():
    download.maybe_download_and_extract(url=data_url, download_dir=data_path)
    download.maybe_download(url=mat_url, download_dir=data_path)
Ejemplo n.º 33
0
# https://s3.amazonaws.com/cadl/models/synset.txt

# Internet URL for the file with the VGG16 model.
# Note that this might change in the future and will need to be updated.
data_url = "https://s3.amazonaws.com/cadl/models/vgg16.tfmodel"

# Directory to store the downloaded data.
data_dir = "vgg16/"

# File containing the TensorFlow graph definition. (Downloaded)
path_graph_def = "vgg16.tfmodel"

#########i###############################################################

if not os.path.exists(data_dir):
    os.makedirs(data_dir)


"""
Download the VGG16 model from the internet if it does not already
exist in the data_dir. WARNING! The file is about 550 MB.
"""

print("Downloading VGG16 Model ...")

# The file on the internet is not stored in a compressed format.
# This function should not extract the file when it does not have
# a relevant filename-extensions such as .zip or .tar.gz
download.maybe_download_and_extract(url=data_url, download_dir=data_dir)

    def maybe_download(self):

        download.maybe_download_and_extract(url=self.data_url,
                                            download_dir=self.data_dir)
def maybe_download_and_extract():
    # Download and extract the Knifey-Spoony data-set if it doesn't already exit
    # in data_dir (set this variable first to the desired directory).
    download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
Ejemplo n.º 36
0
def maybe_download_and_extract():
    download.maybe_download_and_extract(url=data_url, download_dir=data_path)