Example #1
0
    def __init__(self, mode='train'):
        self.mode = mode
        self.augmentation = None

        # Download dataset
        if not os.path.isdir(os.path.join(DATASET_DIR, mode)):
            print('Downloading CIFAR10 dataset..')
            os.makedirs(DATASET_DIR)
            zip_filename = os.path.join(DATASET_DIR, 'tmp.zip')
            download_file_from_google_drive(GDRIVE_HASH, zip_filename)
            # Unzip train and val files
            with zipfile.ZipFile(zip_filename, 'r') as zip_file:
                zip_file.extractall(DATASET_DIR)
                print(f'CIFAR10 dataset downloaded to {DATASET_DIR}.\n')
            # Delete zip file
            os.remove(zip_filename)

        self.filenames = dict()
        self.filenames['image'] = sorted(
            glob(os.path.join(DATASET_DIR, mode, 'image_*.png')))
        self.filenames['label'] = sorted(
            glob(os.path.join(DATASET_DIR, mode, 'label_*.txt')))

        assert len(self.filenames['image']) == len(self.filenames['label']), \
            'Mismatch in the size of input images and labels.'
Example #2
0
def callback(ch, method, properties, body):
    resultado = False
    print(" [x] Received %r" % body)
    task = json.loads(body)
    file_id = task['file_id']
    now = datetime.now()
    ## Veficar si la task es de concepts o vocabulary
    if (task['document_type'] == 'concepts'):
        path_file = "./received_files/concepts/concepts_{0}_{1}.tsv".format(
            file_id, now.strftime("%Y%m%d%H%M%S"))
        utils.download_file_from_google_drive(file_id, path_file)
        print("executing concepts etl with file: {}".format(path_file))
        resultado = concepts_etl_final.execute(path_file)
    elif (task['document_type'] == 'vocabulary'):
        path_file = "./received_files/vocabulary/vocabulary_{0}_{1}.csv".format(
            file_id, now.strftime("%Y%m%d%H%M%S"))
        utils.download_file_from_google_drive(file_id, path_file)
        print("executing vocabulary etl with file: {}".format(path_file))
        resultado = vocabulary_etl_final.execute(path_file)
    ch.basic_ack(delivery_tag=method.delivery_tag)
    if resultado:
        # update task in success
        update_status_task(task['uuid'], True)
    else:
        # update task in error
        update_status_task(str(task['uuid']), False)
def main():
    data_path = os.path.join(_CURRENT_DIR, "../data")
    file_id = "16IQjiGu-jl2oTqr5wsp9MmJxtQiuyIWq"
    destination = os.path.join(data_path, "bird_dataset.zip")
    if not os.path.isfile(destination) and not os.path.isdir(os.path.join(data_path, "bird_dataset")):
        download_file_from_google_drive(file_id, destination)
        os.system("cd {} && unzip bird_dataset.zip".format(data_path))
Example #4
0
def main():
    data_path = os.path.join(_CURRENT_DIR, "../saved_models")
    os.system("mkdir -p {}".format(data_path))
    file_id = "1_-FQFU1i79WySBehqdUXAdbI_-RSvFqb"
    destination = os.path.join(data_path, "darknet53.conv.74")
    if not os.path.isfile(destination):
        download_file_from_google_drive(file_id, destination)
Example #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--image', help="Path to input image")
    parser.add_argument('--style', help="Which style to apply", choices=list(STYLE_IDS.keys()))
    parser.add_argument('--outdir', help="Path to output directory")
    parser.add_argument('--min-image-dim', help="Minimum image dimension", default=1000, type=int)
    parser.add_argument('--num-images', help="Number of images", default=50, type=int)
    parser.add_argument('--shimmer', help="Amount of movement", default=10, type=int)
    args = parser.parse_args()

    print("Loading image")
    img = load_image(args.image, args.min_image_dim)
    print("Downloading model")
    checkpoint_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '%s.ckpt' % args.style)
    download_file_from_google_drive(STYLE_IDS[args.style], checkpoint_path)

    noise1 = args.shimmer * np.random.uniform(size=img.shape) + 177
    noise2 = args.shimmer * np.random.uniform(size=img.shape) + 177

    with imageio.get_writer(os.path.join(args.outdir, 'output.gif'), mode='I') as writer:
        for i in range(args.num_images):
            mult = np.sin(i * 2 * np.pi / args.num_images) / 2 + 0.5
            noise = mult * noise1 + (1 - mult) * noise2
            input_img = img + noise
            print("Transferring style (%d/%d)" % (i+1, args.num_images))
            out = style_transfer(input_img, checkpoint_path)
            writer.append_data(out)
            print("Image saved")
    def __init__(self,
                 n_cluster=3,
                 alpha=1,
                 device='cpu',
                 lam=0.1,
                 pre_train=False,
                 max_cycles=None):
        super().__init__()
        self.n_cluster = n_cluster
        self.alpha = alpha
        self.device = device
        self.lam = lam
        self.max_cycles = max_cycles
        if pre_train:
            if not os.path.exists('vgg_normalised_conv5_1.pth'):
                download_file_from_google_drive(
                    '1IAOFF5rDkVei035228Qp35hcTnliyMol',
                    'vgg_normalised_conv5_1.pth')
            if not os.path.exists('decoder_relu4_1.pth'):
                download_file_from_google_drive(
                    '1kkoyNwRup9y5GT1mPbsZ_7WPQO9qB7ZZ', 'decoder_relu4_1.pth')
            self.vgg_encoder = VGGEncoder('vgg_normalised_conv5_1.pth')
            self.decoder = Decoder(4, 'decoder_relu4_1.pth')
        else:
            self.vgg_encoder = VGGEncoder()
            self.decoder = Decoder(4)

        self.multimodal_style_feature_transfer = MultimodalStyleTransfer(
            n_cluster, alpha, device, lam, max_cycles)
def main():
    data_path = os.path.join(_CURRENT_DIR, "../data")
    file_id = "1EWEhmvDaYYm0SsydUEGWUDrnBzkLEQc_"
    destination = os.path.join(data_path, "switch_detection.zip")
    if not os.path.isfile(destination) and not os.path.isdir(
            os.path.join(data_path, "switch_detection")):
        download_file_from_google_drive(file_id, destination)
        os.system("cd {} && unzip switch_detection.zip".format(data_path))
Example #8
0
def main():
    data_path = os.path.join(_CURRENT_DIR, "../data")
    file_id = "1-VMsdeKxOYATf4xC1qrPr_MtkD3SeVkN"
    destination = os.path.join(data_path, "tu_simple.zip")
    if not os.path.isfile(destination) and not os.path.isdir(
            os.path.join(data_path, "tu_simple")):
        download_file_from_google_drive(file_id, destination)
        os.system("cd {} && unzip tu_simple.zip -d {}".format(
            data_path, data_path))
Example #9
0
def download_word2vec():

    # from https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit
    download_file = 'GoogleNews-vectors-negative300.bin.gz'
    destination = join(WORDVEC_DIR, download_file)
    print("Downloading...")
    utils.download_file_from_google_drive('0B7XkCwpI5KDYNlNUTTlSS21pQmM',
                                          destination)
    unzip_file = 'GoogleNews-vectors-negative300.bin'
    unzip_destination = join(WORDVEC_DIR, unzip_file)

    print("Unzipping...")
    with gzip.open(destination, 'rb') as f_in, open(unzip_destination,
                                                    'wb') as f_out:
        shutil.copyfileobj(f_in, f_out)
def callback(ch, method, properties, body):
    print(" [x] Received %r" % body)
    task = json.loads(body)
    file_id = task['file_id']
    now = datetime.now()
    path_file = "./received_files/vocabulary/vocabulary_{0}_{1}.csv".format(
        file_id, now.strftime("%Y%m%d%H%M%S"))
    utils.download_file_from_google_drive(file_id, path_file)
    print("executing vocabulary etl with file: {}".format(path_file))
    resultado = vocabulary_etl_final.execute(path_file)
    if resultado:
        # update task in success
        update_status_task(task['uuid'], True)
    else:
        # update task in error
        update_status_task(str(task['uuid']), False)
Example #11
0
    def download(self, root, remove_zip=True):
        filename = cfg['dataset_dir'] + '.zip'

        if os.path.exists(root):
            return

        file_id = cfg['gdrive_file_id']

        download_file_from_google_drive(file_id, filename)

        with zipfile.ZipFile(filename, 'r') as f:
            f.extractall()

        if remove_zip:
            os.remove(filename)

        shutil.move(cfg['dataset_dir'], root)
Example #12
0
def get_celeba_tfrec(size):
    data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             'data/celeba/')
    zip_data_path = os.path.join(data_path, 'img_align_celeba.zip')
    raw_data_path = os.path.join(data_path, 'img_align_celeba/')
    training_data_path = os.path.join(data_path, 'train_64x64.tfrec')
    test_data_path = os.path.join(data_path, 'test_64x64.tfrec')

    if not os.path.exists(data_path):
        print('data folder doesn\'t exist, create data folder')
        Path(data_path).mkdir(parents=True, exist_ok=True)
    if not glob(zip_data_path):
        print('Downloading CelebA dataset')
        download_file_from_google_drive('0B7EVK8r0v71pZjFTYXZWM3FlRnM',
                                        zip_data_path)
    if not glob(raw_data_path):
        print('Extracting CelebA dataset')
        with zipfile.ZipFile(zip_data_path, 'r') as zip_ref:
            zip_ref.extractall('data/celeba/')
    if not glob(training_data_path) or not glob(test_data_path):
        print('Creating CelebA TFrecord')
        create_celeba_tfrec()

    def parse(x):
        result = tf.io.parse_tensor(x, out_type=tf.float32)
        result = tf.reshape(result, [size, size, 3])
        return result

    if size == 128:
        train_dataset = tf.data.TFRecordDataset(
            'data/celeba/train_128x128.tfrec').map(
                parse, num_parallel_calls=tf.data.experimental.AUTOTUNE)
        test_dataset = tf.data.TFRecordDataset(
            'data/celeba/test_128x128.tfrec').map(
                parse, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    elif size == 64:
        train_dataset = tf.data.TFRecordDataset(
            'data/celeba/train_64x64.tfrec').map(
                parse, num_parallel_calls=tf.data.experimental.AUTOTUNE)
        test_dataset = tf.data.TFRecordDataset(
            'data/celeba/test_64x64.tfrec').map(
                parse, num_parallel_calls=tf.data.experimental.AUTOTUNE)

    return train_dataset, test_dataset, [-1, size, size, 3]
Example #13
0
 def __init__(self,
              alpha=1,
              device='cpu',
              use_kmeans_gpu=True,
              pre_train=False):
     super().__init__()
     self.alpha = alpha
     self.device = device
     self.kmeans_device = device if use_kmeans_gpu else torch.device('cpu')
     if pre_train:
         if not os.path.exists('vgg_normalised_conv5_1.pth'):
             download_file_from_google_drive('1IAOFF5rDkVei035228Qp35hcTnliyMol',
                                             'vgg_normalised_conv5_1.pth')
         if not os.path.exists('decoder_relu4_1.pth'):
             download_file_from_google_drive('1kkoyNwRup9y5GT1mPbsZ_7WPQO9qB7ZZ',
                                             'decoder_relu4_1.pth')
         self.vgg_encoder = VGGEncoder('vgg_normalised_conv5_1.pth').to(device)
         self.decoder = Decoder(4, 'decoder_relu4_1.pth').to(device)
     else:
         self.vgg_encoder = VGGEncoder().to(device)
         self.decoder = Decoder(4).to(device)
Example #14
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--video', help="Path to input video")
    parser.add_argument('--style',
                        help="Which style to apply",
                        choices=list(STYLE_IDS.keys()))
    parser.add_argument('--outdir', help="Path to output directory")
    parser.add_argument('--min-image-dim',
                        help="Minimum image dimension",
                        default=1000,
                        type=int)
    args = parser.parse_args()

    print("Downloading model")
    checkpoint_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                   '%s.ckpt' % args.style)
    download_file_from_google_drive(STYLE_IDS[args.style], checkpoint_path)
    print("Transferring style")
    out_path = os.path.join(args.outdir, 'output.avi')
    style_transfer_video(args.video, checkpoint_path, out_path)
    print("Video saved to %s" % out_path)
Example #15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--image', help="Path to input image")
    parser.add_argument('--style', help="Which style to apply", choices=list(STYLE_IDS.keys()))
    parser.add_argument('--outdir', help="Path to output directory")
    parser.add_argument('--min-image-dim', help="Minimum image dimension", default=1000, type=int)
    args = parser.parse_args()

    print("Loading image")
    img = load_image(args.image, args.min_image_dim)
    print("Downloading model")
    checkpoint_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '%s.ckpt' % args.style)
    download_file_from_google_drive(STYLE_IDS[args.style], checkpoint_path)
    print("Transferring style")
    out = style_transfer(img, checkpoint_path)

    if not os.path.exists(args.outdir):
        os.makedirs(args.outdir)

    output_path = os.path.join(args.outdir, 'output.jpg')
    save_image(out, output_path)
    print("Image saved to %s" % output_path)
 def _download(self,
               file_name,
               url=None,
               dataset_id=None,
               file_path=None,
               use_tqdm=True):
     r""" download file from google drive.
     Args:
         dataset_id (str): id of file on google drive. guide to get it (https://www.wonderplugin.com/wordpress-tutorials/how-to-apply-for-a-google-drive-api-key/)
         use_tqdm (boolean): use tqdm process bar when downloading
     """
     os.makedirs(os.path.join(self.root_dir, self.dataset_dir, 'raw'),
                 exist_ok=True)
     if dataset_id != None:
         print("Downloading...")
         try:
             try:
                 download_file_from_google_drive(
                     dataset_id,
                     os.path.join(self.root_dir, self.dataset_dir, 'raw'),
                     use_tqdm)
             except:
                 url = "https://www.googleapis.com/drive/v3/files/" + dataset_id + "?alt=media&key=AIzaSyBEp1hj-WxRxAezSd5sGfPmWnLbuxuxSvI"
                 download_with_url(
                     url,
                     os.path.join(self.root_dir, self.dataset_dir, 'raw'),
                     file_name, use_tqdm)
         except:
             try:
                 if os.path.exists(
                         os.path.join(self.root_dir, self.dataset_dir,
                                      'raw', file_name)):
                     os.remove(
                         os.path.join(self.root_dir, self.dataset_dir,
                                      'raw', file_name))
                 download_file_from_google_drive(
                     dataset_id,
                     os.path.join(self.root_dir, self.dataset_dir, 'raw'),
                     use_tqdm)
             except:
                 if os.path.exists(
                         os.path.join(self.root_dir, self.dataset_dir,
                                      'raw', file_name)):
                     os.remove(
                         os.path.join(self.root_dir, self.dataset_dir,
                                      'raw', file_name))
                 url = "https://www.googleapis.com/drive/v3/files/" + dataset_id + "?alt=media&key=AIzaSyBEp1hj-WxRxAezSd5sGfPmWnLbuxuxSvI"
                 download_with_url(
                     url,
                     os.path.join(self.root_dir, self.dataset_dir, 'raw'),
                     file_name, use_tqdm)
         print("Downloaded!")
     elif url != None:
         download_with_url(
             url, os.path.join(self.root_dir, self.dataset_dir, 'raw'),
             file_name, use_tqdm)
     elif file_path != None:
         print('Copying data...')
         copy2(
             file_path,
             os.path.join(self.root_dir, self.dataset_dir, 'raw',
                          file_name))
         print("Copied!")
     else:
         if not os.path.exists(
                 os.path.join(self.root_dir, self.dataset_dir, 'raw',
                              file_name)):
             raise FileExistsError(
                 'please download file %s into %s' %
                 (file_name,
                  os.path.join(self.root_dir, self.dataset_dir, 'raw')))
Example #17
0
import utils
import config
# https://drive.google.com/file/d/1-Fnm3tRx6zedcc-syHfms-yqZypByiIl/view?usp=sharing
if __name__ == "__main__":
    # TAKE ID FROM SHAREABLE LINK
    gdrive_file_id = '1-CdkbAmK_tPVANKSEaE_uMktcjiV44BR'
    # DESTINATION FILE ON YOUR DISK
    destination = config.model_path
    utils.download_file_from_google_drive(gdrive_file_id, destination)
Example #18
0
if not os.path.exists(Img_img_align_celeba_png):
    os.makedirs(Img_img_align_celeba_png)

if not os.path.exists(Anno):
    os.makedirs(Anno)

if not os.path.exists(Eval):
    os.makedirs(Eval)

# download
for i, (fileid, path) in enumerate(zip(ids, paths)):
    print('{}/{} downloading {}'.format(i + 1, len(ids), path))
    path = os.path.join(root, path)
    if not os.path.exists(path):
        download_file_from_google_drive(fileid, path)

# unzip
try:
    subprocess.call([
        '7z', 'x', '-o' + os.path.relpath(os.path.join(root, 'Img')),
        os.path.join(Img_img_celeba, 'img_celeba.7z.*')
    ])
except:
    print('can\'t unzip img_celeba')

try:
    subprocess.call([
        '7z', 'x', '-o' + os.path.relpath(os.path.join(root, 'Img')),
        os.path.join(Img_img_align_celeba_png, 'img_align_celeba_png.7z.*')
    ])
Example #19
0
import os
import sys
import subprocess

from utils import download_file_from_google_drive

# url and path
google_drive_id = "19oAw8wWn3Y7z6CKChRdAyGOB9yupL_Xt"

# directory
try:
    root = os.path.join(sys.argv[1], 'jsv_ver1/')
except:
    root = './vsj_ver1/'

if not os.path.exists(root):
    os.makedirs(root)

# download
download_file_from_google_drive(google_drive_id,
                                os.path.join(root, "jvs_ver1.zip"))

# unzip
try:
    subprocess.call(['unzip', os.path.join(root, 'jvs_ver1.zip'), '-d', root])
except:
    print('can\'t unzip jvs_ver1.zip')
Example #20
0
        else:
            queries = pd.read_csv(os.path.join('data', 'queries_train.tsv'),
                                  sep='\t')
            logging.info("Successfully loaded queries data.")

        import configuration

        config = configuration.ConfigClass()

        # do we need to download a pretrained model?
        model_url = config.get_model_url()
        if model_url is not None and config.get_download_model():
            import utils

            dest_path = 'model.zip'
            utils.download_file_from_google_drive(model_url, dest_path)
            if not os.path.exists(model_dir):
                os.mkdir(model_dir)
            if os.path.exists(dest_path):
                utils.unzip_file(dest_path, model_dir)
                logging.info(
                    f'Successfully downloaded and extracted pretrained model into {model_dir}.'
                )
            else:
                logging.error('model.zip file does not exists.')

        # test for each search engine module
        engine_modules = [
            'search_engine_' + name for name in ['1', '2', '3', 'best']
        ]
        for engine_module in engine_modules:
Example #21
0
    # Initialize model
    config = FashionConfig()
    config.display()
    MODEL_DIR = os.path.join(CUR_DIR, "logs")
    model = modellib.MaskRCNN(model_dir=MODEL_DIR, config=config)

    # if torch.cuda.is_available():
    #     model = model.cuda()

    # Download pretrained weights
    coco_pretrained = os.path.join(CUR_DIR, 'mask_rcnn_coco.pth')
    if not os.path.isfile(coco_pretrained):
        print("Downloading Pretrained Model...")
        share_id = '1RhdD8PkR_AQ1-uP3JS-nbcXaTOILhXua'
        utils.download_file_from_google_drive(share_id, coco_pretrained)

    model.load_state_dict(torch.load(coco_pretrained))

    data_path = os.path.join(CUR_DIR, 'images')

    results_path = os.path.join(CUR_DIR, 'results')

    img_list = [f for f in os.listdir(data_path) if '.jpg' in f]

    for img_name in img_list:
        save_name = os.path.join(results_path, img_name.split('.')[0] + '.pth')
        if os.path.isfile(save_name):
            continue
        img_path = os.path.join(data_path, img_name)