Example #1
0
    def generate_codeforces(self, code, file, out_name):
        m = datamap.DataMap(util.get_file_content(util.get_map(file)))
        self.m = m
        g = Digraph('stones', encoding='utf-8')

        for n in m.nodes:
            if n.is_root:
                count = self.get_module_problem_count(m)
                label = "%s(%s)" % (n.name, str(count))
                # 根节点
                g.node(name=n.name, label=label, style='filled', target="_parent", href="https://codeforces.com/problemset", 
                    fontsize='14',
                    fillcolor="orangered", color='lightgrey', fontcolor="white", fontname="Microsoft YaHei", shape='box')
            else:
                # 普通模块节点
                label = "%s(%s)" % (n.name, str(len(n.problems)))
                g.node(name=n.name, label=label, style='filled', fillcolor="lightslategray", color='lightgrey', 
                    fontsize='12',
                    fontcolor="white", fontname="Microsoft YaHei", shape='box')
                g.edge(n.parent, n.name, color=theme.color_arrow)

            # add problem
            last = ""
            for p in n.problems:
                problem = code.get_db_problem(p.id, False)
                if not problem:
                    print("problem not exist "+p.id)
                    continue
                title = problem['name']
                level = code.get_level(problem)

                idstr = str(p.id)
                title = idstr+". "+title
                color = "lightgrey"

                if level == "Easy":
                    color = "greenyellow"
                elif level == "Medium":
                    color = "orange"
                elif level == "Hard":
                    color = "red"
                else:
                    print("unknown level:", level)

                # 题目节点
                href = "https://codeforces.com/problemset/problem/%d/%s" % (problem['contestId'], problem['index'])

                g.node(name=idstr, label=title, target="_parent", href=href, 
                        color=color, fontname="Microsoft YaHei", fontsize='12', shape='box')

                if len(last) > 0:
                    g.edge(last, idstr, color=theme.color_arrow)
                else:
                    g.edge(n.name, idstr, color=theme.color_arrow)
                last = idstr

        g.format = 'svg'
        g.render(filename=util.get_images(out_name))
        os.remove(util.get_images(out_name))
        self.post_process(util.get_images(out_name)+".svg")
Example #2
0
    def generate_leetcode(self, leet, file, slug, out_name):
        c = util.get_file_content(util.get_map(file))
        m = datamap.DataMap(c)
        self.m = m
        g = Digraph('stones', encoding='utf-8')

        for n in m.nodes:
            if n.is_root:
                count = self.get_module_problem_count(m)
                label = "%s(%s)" % (n.name, str(count))
                # 根节点
                g.node(name=n.name, label=label, style='filled', target="_parent", href="https://leetcode-cn.com/tag/"+slug, 
                    fontsize='14',
                    fillcolor="orangered", color='lightgrey', fontcolor="white", fontname="Microsoft YaHei", shape='box')
            else:
                # 普通模块节点
                label = "%s(%s)" % (n.name, str(len(n.problems)))
                g.node(name=n.name, label=label, style='filled', fillcolor="lightslategray", color='lightgrey', 
                    fontsize='12',
                    fontcolor="white", fontname="Microsoft YaHei", shape='box')
                g.edge(n.parent, n.name, color=theme.color_arrow)

            # add problem
            last = ""
            for p in n.problems:
                title = leet.get_title(p.id)
                level = leet.get_level(p.id)
                problem = leet.get_problem(p.id)
                idstr = str(p.id)
                title = idstr+". "+title
                color = "lightgrey"

                if level == "Easy":
                    color = "greenyellow"
                elif level == "Medium":
                    color = "orange"
                elif level == "Hard":
                    color = "red"
                else:
                    print("unknown level:", level)
                    continue
                slug = problem['data']['question']['questionTitleSlug']

                # 题目节点
                g.node(name=idstr, label=title, target="_parent", href="https://leetcode-cn.com/problems/"+slug, 
                        color=color, fontname="Microsoft YaHei", fontsize='12', shape='box')

                if len(last) > 0:
                    g.edge(last, idstr, color=theme.color_arrow)
                else:
                    g.edge(n.name, idstr, color=theme.color_arrow)
                last = idstr

        g.format = 'svg'
        g.render(filename=util.get_images(out_name))
        os.remove(util.get_images(out_name))
        self.post_process(util.get_images(out_name)+".svg")
Example #3
0
    def dataToSlides(stitch=True):
        t_gen_slides_0 = time.time()
        all_images = get_images()
        all_slides = []
        for imagefile in all_images:
            all_slides.append(
                Slide(imagefile)
            )  # Pixel computations are done here, as the slide is created.
        printl('Total # of non-zero pixels: ' + str(Pixel.total_pixels) +
               ', total number of pixels after filtering: ' +
               str(len(Pixel.all)))
        printl('Total # of blob2ds: ' + str(len(Blob2d.all)))
        printl('Generating ' + str(len(all_slides)) + ' slides took', end='')
        print_elapsed_time(t_gen_slides_0, time.time(), prefix='')
        printl(
            "Pairing all blob2ds with their potential partners in adjacent slides",
            flush=True)
        Slide.set_possible_partners(all_slides)

        if stitch:
            printl('Setting shape contexts for all blob2ds ',
                   flush=True,
                   end="")
            Slide.set_all_shape_contexts(all_slides)
            stitchlist = Pairing.stitchAllBlobs(
                all_slides, debug=False
            )  # TODO change this to work with a list of ids or blob2ds
        else:
            printl(
                '\n-> Skipping stitching the slides, this will result in less accurate blob3ds for the time being'
            )
        blob3dlist = Slide.extract_blob3ds(all_slides, stitched=stitch)
        printl('There are a total of ' + str(len(blob3dlist)) + ' blob3ds')
        return all_slides, blob3dlist  # Returns slides and all their blob3ds in a list
Example #4
0
def parse_raw(input) -> tf.Tensor:
    """ Converts a raw Patient proto to a Tensor.

    Args:
        input:

    Returns
        A Tensor
    """
    patient, images = get_images(input)
    images = dict(images)

    # Check if example has necessary keys. If not, return None.
    image_keys = get_required_image_keys(list(images.keys()))
    if not image_keys:
        return None

    patient["group"] = calculate_group_from_results(patient["pCR"],
                                                    patient["RCB"])
    for k in patient.keys():
        patient[k] = int64List_feature([patient[k]])

    for k in image_keys:
        images[k.replace("image",
                         "shape")] = int64List_feature(images[k].shape)
        images[k] = int64List_feature(images[k].numpy().flatten().tolist())

    patient.update(images)

    return tf.train.Example(features=tf.train.Features(feature=patient))
Example #5
0
def get_filenames(fixed, start, end):
    source = util.format_filename(fixed)
    svgs = util.get_images()
    destinations = sorted([s for s in svgs
            if s >= util.format_filename(start) and 
               s <= util.format_filename(end)])

    return source, destinations
Example #6
0
def main(args: List[str]) -> None:
    """ Attempts to parse all the Images from a list TFRecords and displays
    each Series as an animated image of the 3D stack.

    Args:
        args: `view_data.py` followed by a list of TFRecord paths.
            e.g. ["view_data.py", "data_1.tfrecords", "data_2.tfrecords"]

    Return:
        None
    """
    for raw in iter(tf.data.TFRecordDataset(args[1:])):
        patient_details, image_stacks = get_images(raw)
        _logger.info(f"Patient details: {patient_details}")
        for name, image in image_stacks:
            visualise_image(name, image)
Example #7
0
def select_folder_sample(root, features, output, test=False, test_num=0, test_out=""):
    """
    This method will select samples from a
    directory-sample structure. This means
    structure where each directory is a sample
    under different conditions (i.e each
    directory is a person's face).

    root - the path of the root folder.
    features - list with numbered features to get.
    """
    
    # Holds the images
    data = []

    # Get all the directories (i.e samples)
    dirs = util.get_directories(root)

    # Now go through every directory and
    # pick the desired features from each folder
    for d in dirs:
        # use numpy to get the partial samples
        images = np.array(util.get_images(d))
        samples = images[features]
        data.extend(list(samples)) # adds to the list
        util.update_line("Getting imgs in folder: %s." % d)

    # Get the testing stuff if chosen
    if test: select_test(data, test_num, test_out)

    # Save all remaining images to the output dir
    print(Fore.GREEN) # Change colour to RED
    for i in range(len(data)):
        sample = data[i]
        percent = ((i+1)/len(data))*100
        util.update_line("Copying samples progess: %.1f%%" % percent)
        dts = "/".join([util.abspath(output), os.path.basename(sample)])
        util.copy_image(sample, dts)
    print(Style.RESET_ALL)   
Example #8
0
    # Init ANSI convertion for windows.
    init(convert=True)
    print("Ansi conversion initialised.")

    # Change color of usage to red
    __doc__ = Fore.RED + __doc__
    # Use new __doc__ to parse arguments
    arguments = docopt(__doc__, version="1.0")
    # Print full usage if help selected
    if arguments['--help']: print(__doc__)

    # Retrieving location of the files
    img_path = arguments['SAMPLEPATH']
    positive_info_file = arguments['INFOPATH']
    images = get_images(img_path)

    # Gets what function to use as saving info func
    save_func = save_info  # default function uses haar cascade
    if arguments['--function']:
        save_func = eval(arguments['--function'])

    # Finally get the face cascade obj
    if arguments['--cascade']:
        xml_file = arguments['--cascade']
        printf(Fore.GREEN)  # Change colour to red
        print("Cascade file was provided: %s" % xml_file)
        printf(Style.RESET_ALL, flush=True)  # Change back colours and flush
    else:  # Use default
        xml_file = "classifiers/haarcascade_frontalface_default.xml"
        printf(Fore.RED)  # Change colour to green
Example #9
0
    "-content",
    type=str,
    default=
    "https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg",
)
parser.add_argument(
    "-style",
    type=str,
    default=
    "https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg",
)
parser.add_argument("-epochs", type=int, default=20)
args = parser.parse_args()

# get images from url
content_path, style_path = util.get_images(args.content, args.style)

# load image
content_image = util.load_img(content_path)
style_image = util.load_img(style_path)

# VGG
content_layers = ["block5_conv2"]
style_layers = [
    "block1_conv1",
    "block2_conv1",
    "block3_conv1",
    "block4_conv1",
    "block5_conv1",
]
Example #10
0
    labels = get_labels()
    data_size = {'train': len(labels), 'test': 10357}

    model = NetworkConstr(weights='imagenet', pooling='avg', include_top=False)

    # Load process train/test data
    for key, size in data_size.items():
        save_fname = f'{args.network}_avg_features_{key}.npy'
        save_path = os.path.join(args.savedir, save_fname)
        if os.path.exists(save_path):
            print(f'{save_path} already exists, skipping!')
            continue

        print(f'Load {key} data...')
        images = np.zeros((size, input_size, input_size, 3), dtype=K.floatx())
        for i, (img, img_id) in tqdm(enumerate(get_images(key, input_size))):
            x = preprocess_func(np.expand_dims(img, axis=0))
            images[i] = x

        # Run predictions for training data and reshape to get feature vectors
        features = model.predict(images, batch_size=32, verbose=1)
        images = None
        features = features.reshape(
            (features.shape[0], np.prod(features.shape[1:])))

        print(f'Saving to {save_path}')
        with open(save_path, 'wb') as f:
            np.save(f, features)

    print('Done!')
    sys.exit(0)
Example #11
0
    # Print erlcome text
    print(Fore.BLUE)
    print("Negative Annotation... Settings passed:")
    print("Background file path: %s" % bg_info_path)
    print("Background images sample %s" % images_path)
    print("Use relative path: %s" % arguments['--relative'])
    print(Style.RESET_ALL)

    # Open/create background file
    open(bg_info_path, "w").close()
    bg_file = open(bg_info_path, "a")
    print("file opened successfully, retrieving imags (might take a while)")

    # Go through all images and save them
    images = get_images(abspath(images_path))
    print("Starting to save images")
    print(Fore.GREEN)
    for i in range(len(images)):

        # Print progress here
        percent = ((i + 1) / len(images)) * 100
        update_line("Progress: %.2f" % percent)

        # Save the info to the file
        save_bg_info(images[i], bg_file, func)
    print(Style.RESET_ALL)

    # Finally close the file
    bg_file.close()
Example #12
0
parser = argparse.ArgumentParser()
parser.add_argument('-c', default=10, type=int, dest='nclusters')
parser.add_argument('-plot', action='store_true')
args = parser.parse_args()
nclusters = args.nclusters
fout = 'score_' + str(nclusters) + '.pickle'

if os.path.isfile(fout):
    with open(fout, 'r') as fin:
        (nm, cd) = pickle.load(fin)
else:
    (nm, cd) = util.get_scores(nclusters, 'both', hbins=64, rand=True)
    with open(fout, 'w') as fout:
        pickle.dump((nm, cd), fout)

(images, files) = util.get_images(nclusters)
#print [(files[i], files[j]) for i in range(1000) for j in range(1000)
#       if (nm[i, j] > 15 and not util.same_cluster(i, j))]

x = []
y = []
for i in range(nm.shape[0]):
    for j in range(nm.shape[1]):
        if nm[i, j] != 0:
            x.append((nm[i, j], cd[i, j]))
            if util.same_cluster(i, j):
                y.append(1)
            else:
                y.append(-1)
x = np.array(x)
y = np.array(y)
Example #13
0
from tqdm import tqdm
from util import get_labels, get_images
import csv
import numpy as np

# Define constants
INPUT_SIZE = 299
fname = 'model1_finetune.h5'
nr_predictions = 10357

# Get ids, label names and images
print('Load data...')
labels = get_labels().sort_values(by=['breed']).breed.unique()
ids = []
images = np.zeros((nr_predictions, INPUT_SIZE, INPUT_SIZE, 3), dtype='float16')
for i, (img, img_id) in tqdm(enumerate(get_images('test', INPUT_SIZE))):
    x = inception_v3.preprocess_input(np.expand_dims(img, axis=0))
    images[i] = x
    ids.append(img_id)

# Load model weights
print(f'Load model from {fname}')
model = load_model(fname)

# Make predictions on input images
print('Predict...')
predictions = model.predict(images, verbose=1)

# Save to csv
print('Saving predictions...')
with open('predictions.csv', 'w') as csvfile:
Example #14
0
USE_GENSEL = False
USE_AUTOENC = False #TODO
USE_ICA = True
USE_CANNY = True
USE_CORNERHARRIS = True
TRAIN = False
CHANNELS = 3

# Load labels
print('Load labels...')
labels = get_labels()[:n_images]

# Load training data
print('Load training data...')
x_train = np.zeros((n_images, INPUT_SIZE, INPUT_SIZE, 3), dtype=K.floatx())
for i, (img, img_id) in tqdm(enumerate(get_images('train', INPUT_SIZE, amount=n_images))):
    x = inception_v3.preprocess_input(np.expand_dims(img, axis=0))
    x_train[i] = x
y_train = one_hot(labels['breed'].values, num_classes=NUM_CLASSES)

# Arguments of ImageDataGenerator define types of augmentation to be performed
# E.g: Horizontal flip, rotation, etc...
# no fitting required since we don't use centering/normalization/whitening
datagen = ImageDataGenerator(
    rotation_range=20,
    width_shift_range=.2,
    height_shift_range=.2,
    horizontal_flip=True,
    validation_split=.1)

# PCA feature extraction:
Example #15
0
from util import get_images, TRAIN_DIRECTORY, VALIDATION_DIRECTORY, CROP_SIZE

from time import time
from math import floor
from sys import argv

import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras import datasets, layers, models

import config_gpu

import matplotlib.pyplot as plt

train_images, train_labels, _ = get_images(TRAIN_DIRECTORY)
val_images, val_labels, _ = get_images(VALIDATION_DIRECTORY)

total_images = train_images.shape[0]

epochs = 10
if len(argv) > 1:
    epochs = int(argv[1])

model = models.Sequential()

# Modelando a rede
model.add(
    layers.Conv2D(32, (3, 3),
                  padding='same',
                  activation='relu',
                  input_shape=(CROP_SIZE, CROP_SIZE, 1)))
Example #16
0
        data["code"] = code
    update_data(original, changes)

    #for code, data in sorted(original.items()):
        #name = data["name"] if "name" in data else ""
        #print code, name
        #for date, data in reversed(sorted(changes.items())):
            #if code in data and "flag" in data[code]:
                #print "    " + date

    def write_js(var, dict):
        return "{0} = {1};".format(var, json.dumps(dict, sort_keys=True,
            indent = 4, separators=(',',': ')))

    original_str = write_js("initial_countries", original)
    changes_str = write_js("changes", changes)
    #fills_str = write_js("fills", fills)
    with open("static/js/data/initial.js", 'w') as f:
        f.write(original_str)
    with open("static/js/data/changes.js", 'w') as f:
        f.write(changes_str)
    #with open("static/js/data/fills.js", 'w') as f:
        #f.write(fills_str)



if __name__ == "__main__":
    filenames = list(reversed(util.get_images()))
    convert(filenames[0], filenames[1:])

Example #17
0
def main():
    # Make directories if they don't already exist
    util.make_directories()
    # Load model options
    model_options = constants.MAIN_MODEL_OPTIONS

    ########## DATA ##########
    if constants.PRINT_MODEL_STATUS: print("Loading data")

    dataset_map = util.load_dataset_map()
    train_captions, val_captions, test_captions = util.load_text_vec(
        'Data', constants.VEC_OUTPUT_FILE_NAME, dataset_map)
    train_image_dict, val_image_dict, test_image_dict = util.get_images(
        'Data', constants.DIRECTORY_PATH, constants.FLOWERS_DICTS_PATH)

    ########## MODEL ##########
    generator = CondBeganGenerator(model_options)
    discriminator = CondBeganDiscriminator(model_options)

    # Put G and D on cuda if GPU available
    if torch.cuda.is_available():
        if constants.PRINT_MODEL_STATUS: print("CUDA is available")
        generator = generator.cuda()
        discriminator = discriminator.cuda()
        if constants.PRINT_MODEL_STATUS: print("Moved models to GPU")

    # Initialize weights
    generator.apply(util.weights_init)
    discriminator.apply(util.weights_init)

    ########## SAVED VARIABLES #########
    new_epoch = 0
    began_k = 0
    train_losses = {"generator": [], "discriminator": [], "converge": []}
    val_losses = {"generator": [], "discriminator": [], "converge": []}
    losses = {'train': train_losses, 'val': val_losses}

    ########## OPTIMIZER ##########
    g_optimizer = optim.Adam(generator.parameters(),
                             lr=constants.LR,
                             betas=constants.BETAS)
    # Changes the optimizer to SGD if declared in constants
    if constants.D_OPTIMIZER_SGD:
        d_optimizer = optim.SGD(discriminator.parameters(), lr=constants.LR)
    else:
        d_optimizer = optim.Adam(discriminator.parameters(),
                                 lr=constants.LR,
                                 betas=constants.BETAS)
    if constants.PRINT_MODEL_STATUS: print("Added optimizers")

    ########## RESUME OPTION ##########
    if args.resume:
        print("Resuming from epoch " + args.resume)
        checkpoint = torch.load(constants.SAVE_PATH + 'weights/epoch' +
                                str(args.resume))
        new_epoch = checkpoint['epoch'] + 1
        generator.load_state_dict(checkpoint['g_dict'])
        discriminator.load_state_dict(checkpoint['d_dict'])
        began_k = checkpoint['began_k']
        g_optimizer.load_state_dict(checkpoint['g_optimizer'])
        d_optimizer.load_state_dict(checkpoint['d_optimizer'])
        losses = torch.load(constants.SAVE_PATH + 'losses')

    ########## VARIABLES ##########
    noise_vec = torch.FloatTensor(constants.BATCH_SIZE, model_options['z_dim'])
    text_vec = torch.FloatTensor(constants.BATCH_SIZE,
                                 model_options['caption_vec_len'])
    real_img = torch.FloatTensor(constants.BATCH_SIZE,
                                 model_options['image_channels'],
                                 constants.IMAGE_SIZE, constants.IMAGE_SIZE)
    real_caption = torch.FloatTensor(constants.BATCH_SIZE,
                                     model_options['caption_vec_len'])
    if constants.USE_CLS:
        wrong_img = torch.FloatTensor(constants.BATCH_SIZE,
                                      model_options['image_channels'],
                                      constants.IMAGE_SIZE,
                                      constants.IMAGE_SIZE)
        wrong_caption = torch.FloatTensor(constants.BATCH_SIZE,
                                          model_options['caption_vec_len'])

    # Add cuda GPU option
    if torch.cuda.is_available():
        noise_vec = noise_vec.cuda()
        text_vec = text_vec.cuda()
        real_img = real_img.cuda()
        real_caption = real_caption.cuda()
        if constants.USE_CLS: wrong_img = wrong_img.cuda()

    ########## Training ##########
    num_iterations = 0
    for epoch in range(new_epoch, constants.NUM_EPOCHS):
        print("Epoch %d" % (epoch))
        st = time.time()

        for i, batch_iter in enumerate(
                util.grouper(train_captions.keys(), constants.BATCH_SIZE)):
            batch_keys = [x for x in batch_iter if x is not None]
            curr_batch_size = len(batch_keys)

            discriminator.train()
            generator.train()
            discriminator.zero_grad()  # Zero out gradient
            # Save computations for gradient calculations
            for p in discriminator.parameters():
                p.requires_grad = True  # Need this to be true to update generator as well

            ########## BATCH DATA #########
            noise_batch = torch.randn(curr_batch_size, model_options['z_dim'])
            text_vec_batch = torch.Tensor(
                util.get_text_description(train_captions, batch_keys))
            real_caption_batch = torch.Tensor(
                util.get_text_description(train_captions, batch_keys))
            real_img_batch = torch.Tensor(
                util.choose_real_image(train_image_dict, batch_keys))
            if constants.USE_CLS:
                wrong_img_batch = torch.Tensor(
                    util.choose_wrong_image(train_image_dict, batch_keys))
            if torch.cuda.is_available():
                noise_batch = noise_batch.cuda()
                text_vec_batch = text_vec_batch.cuda()
                real_caption_batch = real_caption_batch.cuda()
                real_img_batch = real_img_batch.cuda()
                if constants.USE_CLS: wrong_img_batch = wrong_img_batch.cuda()

            # Fill in tensors with batch data
            noise_vec.resize_as_(noise_batch).copy_(noise_batch)
            text_vec.resize_as_(text_vec_batch).copy_(text_vec_batch)
            real_caption.resize_as_(text_vec_batch).copy_(text_vec_batch)
            real_img.resize_as_(real_img_batch).copy_(real_img_batch)
            if constants.USE_CLS:
                wrong_img.resize_as_(wrong_img_batch).copy_(wrong_img_batch)

            ########## RUN THROUGH GAN ##########
            gen_image = generator.forward(Variable(text_vec),
                                          Variable(noise_vec))

            real_img_passed = discriminator.forward(Variable(real_img),
                                                    Variable(real_caption))
            fake_img_passed = discriminator.forward(gen_image.detach(),
                                                    Variable(real_caption))
            if constants.USE_CLS:
                wrong_img_passed = discriminator.forward(
                    Variable(wrong_img), Variable(real_caption))

            ########## TRAIN DISCRIMINATOR ##########
            if constants.USE_REAL_LS:
                # Real loss sensitivity
                # L_D = L(y_r) - k * (L(y_f) + L(y_f, r))
                # L_G = L(y_f) +  L(y_f, r)
                # k = k + lambda_k * (gamma * L(y_r) + L(y_f) +  L(y_f, r))
                d_real_loss = torch.mean(
                    torch.abs(real_img_passed - Variable(real_img)))
                d_fake_loss = torch.mean(torch.abs(fake_img_passed -
                                                   gen_image))
                d_real_sensitivity_loss = torch.mean(
                    torch.abs(fake_img_passed - Variable(real_img)))
                d_loss = d_real_loss - began_k * (
                    0.5 * d_fake_loss + 0.5 * d_real_sensitivity_loss)

                # Update began k value
                balance = (model_options['began_gamma'] * d_real_loss -
                           0.5 * d_fake_loss -
                           0.5 * d_real_sensitivity_loss).data[0]
                began_k = min(
                    max(began_k + model_options['began_lambda_k'] * balance,
                        0), 1)
            elif constants.USE_CLS:
                # Cond BEGAN Discrminator Loss with CLS
                # L(y_w) is the caption loss sensitivity CLS (makes sure that captions match the image)
                # L_D = L(y_r) + L(y_f, w) - k * L(y_f)
                # L_G = L(y_f)
                # k = k + lambda_k * (gamma * (L(y_r) + L(y_f, w)) - L(y_f))
                d_real_loss = torch.mean(
                    torch.abs(real_img_passed - Variable(real_img)))
                d_wrong_loss = torch.mean(
                    torch.abs(fake_img_passed - Variable(wrong_img)))
                d_fake_loss = torch.mean(torch.abs(fake_img_passed -
                                                   gen_image))
                d_loss = 0.5 * d_real_loss + 0.5 * d_wrong_loss - began_k * d_fake_loss

                # Update began k value
                balance = (model_options['began_gamma'] *
                           (0.5 * d_real_loss + 0.5 * d_wrong_loss) -
                           d_fake_loss).data[0]
                began_k = min(
                    max(began_k + model_options['began_lambda_k'] * balance,
                        0), 1)
    # No CLS option
            else:
                # Cond BEGAN Discriminator Loss
                # L_D = L(y_r) - k * L(y_f)
                # k = k + lambda_k * (gamma * L(y_r) + L(y_f))
                d_real_loss = torch.mean(
                    torch.abs(real_img_passed - Variable(real_img)))
                d_fake_loss = torch.mean(torch.abs(fake_img_passed -
                                                   gen_image))
                d_loss = d_real_loss - began_k * d_fake_loss

                # Update began k value
                balance = (model_options['began_gamma'] * d_real_loss -
                           d_fake_loss).data[0]
                began_k = min(
                    max(began_k + model_options['began_lambda_k'] * balance,
                        0), 1)

            d_loss.backward()
            d_optimizer.step()

            ########## TRAIN GENERATOR ##########
            generator.zero_grad()
            for p in discriminator.parameters():
                p.requires_grad = False

            # Generate image again if you want to
            if constants.REGEN_IMAGE:
                noise_batch = torch.randn(curr_batch_size,
                                          model_options['z_dim'])
                if torch.cuda.is_available():
                    noise_batch = noise_batch.cuda()
                noise_vec.resize_as_(noise_batch).copy_(noise_batch)
                gen_image = generator.forward(Variable(text_vec),
                                              Variable(noise_vec))

            new_fake_img_passed = discriminator.forward(
                gen_image, Variable(real_caption))

            # Generator Loss
            # L_G = L(y_f)
            g_loss = torch.mean(torch.abs(new_fake_img_passed - gen_image))
            if constants.USE_REAL_LS:
                g_loss += torch.mean(
                    torch.abs(new_fake_img_passed - Variable(real_img)))
            elif constants.USE_CLS:
                g_loss -= torch.mean(
                    torch.abs(new_fake_img_passed - Variable(wrong_img)))

            g_loss.backward()
            g_optimizer.step()

            # M = L(y_r) + |gamma * L(y_r) - L(y_f)|
            convergence_val = d_real_loss + abs(balance)

            # learning rate decay
            g_optimizer = util.adjust_learning_rate(g_optimizer,
                                                    num_iterations)
            d_optimizer = util.adjust_learning_rate(d_optimizer,
                                                    num_iterations)

            if i % constants.LOSS_SAVE_IDX == 0:
                losses['train']['generator'].append((g_loss.data[0], epoch, i))
                losses['train']['discriminator'].append(
                    (d_loss.data[0], epoch, i))
                losses['train']['converge'].append(
                    (convergence_val.data[0], epoch, i))

            num_iterations += 1

        print('Total number of iterations: ', num_iterations)
        print('Training G Loss: ', g_loss.data[0])
        print('Training D Loss: ', d_loss.data[0])
        print('Training Convergence: ', convergence_val.data[0])
        print('K value: ', began_k)
        epoch_time = time.time() - st
        print("Time: ", epoch_time)

        if epoch == constants.REPORT_EPOCH:
            with open(constants.SAVE_PATH + 'report.txt', 'w') as f:
                f.write(constants.EXP_REPORT)
                f.write("Time per epoch: " + str(epoch_time))
            print("Saved report")

        ########## DEV SET #########
        # Calculate dev set loss
        # Volatile is true because we are running in inference mode (no need to calculate gradients)
        generator.eval()
        discriminator.eval()
        for i, batch_iter in enumerate(
                util.grouper(val_captions.keys(), constants.BATCH_SIZE)):
            batch_keys = [x for x in batch_iter if x is not None]
            curr_batch_size = len(batch_keys)

            # Gather batch data
            noise_batch = torch.randn(curr_batch_size, model_options['z_dim'])
            text_vec_batch = torch.Tensor(
                util.get_text_description(val_captions, batch_keys))
            real_caption_batch = torch.Tensor(
                util.get_text_description(val_captions, batch_keys))
            real_img_batch = torch.Tensor(
                util.choose_real_image(val_image_dict, batch_keys))
            if constants.USE_CLS:
                wrong_img_batch = torch.Tensor(
                    util.choose_wrong_image(val_image_dict, batch_keys))
            if torch.cuda.is_available():
                noise_batch = noise_batch.cuda()
                text_vec_batch = text_vec_batch.cuda()
                real_caption_batch = real_caption_batch.cuda()
                real_img_batch = real_img_batch.cuda()
                if constants.USE_CLS:
                    wrong_img_batch = wrong_img_batch.cuda()

            # Fill in tensors with batch data
            noise_vec.resize_as_(noise_batch).copy_(noise_batch)
            text_vec.resize_as_(text_vec_batch).copy_(text_vec_batch)
            real_caption.resize_as_(text_vec_batch).copy_(text_vec_batch)
            real_img.resize_as_(real_img_batch).copy_(real_img_batch)
            if constants.USE_CLS:
                wrong_img.resize_as_(wrong_img_batch).copy_(wrong_img_batch)

            # Run through generator
            gen_image = generator.forward(Variable(
                text_vec, volatile=True), Variable(
                    noise_vec,
                    volatile=True))  # Returns tensor variable holding image

            # Run through discriminator
            real_img_passed = discriminator.forward(
                Variable(real_img, volatile=True),
                Variable(real_caption, volatile=True))
            fake_img_passed = discriminator.forward(
                gen_image.detach(), Variable(real_caption, volatile=True))
            if constants.USE_CLS:
                wrong_img_passed = discriminator.forward(
                    Variable(wrong_img, volatile=True),
                    Variable(real_caption, volatile=True))

            # Calculate D loss
            # D LOSS
            if constants.USE_REAL_LS:
                d_real_loss = torch.mean(
                    torch.abs(real_img_passed - Variable(real_img)))
                d_fake_loss = torch.mean(torch.abs(fake_img_passed -
                                                   gen_image))
                d_real_sensitivity_loss = torch.mean(
                    torch.abs(fake_img_passed - Variable(real_img)))
                d_loss = d_real_loss - began_k * (
                    0.5 * d_fake_loss + 0.5 * d_real_sensitivity_loss)

                balance = (model_options['began_gamma'] * d_real_loss -
                           0.5 * d_fake_loss -
                           0.5 * d_real_sensitivity_loss).data[0]
            elif constants.USE_CLS:
                d_real_loss = torch.mean(
                    torch.abs(real_img_passed - Variable(real_img)))
                d_wrong_loss = torch.mean(
                    torch.abs(fake_img_passed - Variable(wrong_img)))
                d_fake_loss = torch.mean(torch.abs(fake_img_passed -
                                                   gen_image))
                d_loss = 0.5 * d_real_loss + 0.5 * d_wrong_loss - began_k * d_fake_loss

                balance = (model_options['began_gamma'] *
                           (0.5 * d_real_loss + 0.5 * d_wrong_loss) -
                           d_fake_loss).data[0]
    # No CLS option
            else:
                d_real_loss = torch.mean(
                    torch.abs(real_img_passed - Variable(real_img)))
                d_fake_loss = torch.mean(torch.abs(fake_img_passed -
                                                   gen_image))
                d_loss = d_real_loss - began_k * d_fake_loss

                # Update began k value
                balance = (model_options['began_gamma'] * d_real_loss -
                           d_fake_loss).data[0]

            # Calculate G loss
            if constants.USE_REAL_LS:
                g_loss = 0.5 * torch.mean(
                    torch.abs(fake_img_passed - gen_image))
                g_loss += 0.5 * torch.mean(
                    torch.abs(fake_img_passed - Variable(real_img)))
            elif constants.USE_CLS:
                g_loss = torch.mean(torch.abs(fake_img_passed - gen_image))
                g_loss -= 0.5 * torch.mean(
                    torch.abs(fake_img_passed - Variable(wrong_img)))
            else:
                # L_G = L(y_f)
                g_loss = torch.mean(torch.abs(fake_img_passed - gen_image))

            # M = L(y_r) + |gamma * L(y_r) - L(y_f)|
            convergence_val = d_real_loss + abs(balance)

            if i % constants.LOSS_SAVE_IDX == 0:
                losses['val']['generator'].append((g_loss.data[0], epoch, i))
                losses['val']['discriminator'].append(
                    (d_loss.data[0], epoch, i))
                losses['val']['converge'].append(
                    (convergence_val.data[0], epoch, i))

        print('Val G Loss: ', g_loss.data[0])
        print('Val D Loss: ', d_loss.data[0])
        print('Val Convergence: ', convergence_val.data[0])

        # Save losses
        torch.save(losses, constants.SAVE_PATH + 'losses')

        # Save images
        vutils.save_image(gen_image[0].data.cpu(),
                          constants.SAVE_PATH + 'images/gen0_epoch' +
                          str(epoch) + '.png',
                          normalize=True)
        vutils.save_image(gen_image[1].data.cpu(),
                          constants.SAVE_PATH + 'images/gen1_epoch' +
                          str(epoch) + '.png',
                          normalize=True)
        vutils.save_image(fake_img_passed[0].data.cpu(),
                          constants.SAVE_PATH + 'images/gen_recon0_epoch' +
                          str(epoch) + '.png',
                          normalize=True)
        vutils.save_image(fake_img_passed[1].data.cpu(),
                          constants.SAVE_PATH + 'images/gen_recon1_epoch' +
                          str(epoch) + '.png',
                          normalize=True)
        # vutils.save_image(real_img_passed[0].data.cpu(),
        #             constants.SAVE_PATH + 'images/real_recon0_epoch' + str(epoch) + '.png',
        #             normalize=True)
        # vutils.save_image(real_img_passed[1].data.cpu(),
        #             constants.SAVE_PATH + 'images/real_recon1_epoch' + str(epoch) + '.png',
        #             normalize=True)

        # Save model
        if epoch % constants.CHECKPOINT_FREQUENCY == 0 and epoch != 0 or epoch == constants.NUM_EPOCHS - 1:
            save_checkpoint = {
                'epoch': epoch,
                'g_dict': generator.state_dict(),
                'd_dict': discriminator.state_dict(),
                'g_optimizer': g_optimizer.state_dict(),
                'd_optimizer': d_optimizer.state_dict(),
                'began_k': began_k
            }
            torch.save(save_checkpoint,
                       constants.SAVE_PATH + 'weights/epoch' + str(epoch))
Example #18
0
sys.path.insert(0, '../')  # dir of the module util


js = json.load(open("db.json"))

# bbox = util.get_bboxes(js["bbox_dir"].encode())
base = js["base"]
cont = js["last_cont"]  # save this, indica el contador de imagenes en donde quedaste
min_images = js["min_images"]
max_images = js["max_images"]

print "loading..."
cropped_js = json.load(open(js["cropped_js"].encode()))
images_path = util.img_list(js["db_dir"].encode())
images = util.get_images(images_path[min_images:max_images])
# bbox_portion = bbox[min_images:max_images]
print "done!"

cv2.namedWindow("cropper")
# cv2.namedWindow("original")

# bbox_im = bbox_portion[cont]
actual_im = images[cont]
# clone_im = images[cont].copy()
# cv2.rectangle(clone_im, bbox_im.pt1, bbox_im.pt2, (0, 255, 0), 1)

cv2.setMouseCallback("cropper", util.click_and_crop)
cv2.imshow("cropper", actual_im)
# cv2.imshow("original", clone_im)
Example #19
0
    def start(self, context, return_queue):
        # Init script variables
        start = time()
        end = time()
        website_driver = None
        email_driver = None
        logger = self.logger
        disable_logging = self.disable_logging
        exception_raised = True
        exception_type = ""
        proxy_address = ""
        if BakecaSlave.use_proxy:
            proxy_address = BakecaSlave.proxy.get_address()
            if proxy_address is None:
                raise ProxyException("No more proxies available!")
        if BakecaSlave.use_lpm:
            proxy_address = BakecaSlave.lpm_address

        # Try and read last state from file
        self.read_last_state()
        # Get city and category and increment as needed
        city_id, category_id = self.get_additional_data()
        # Get image file and text file
        self.parse_context(context)
        logger.info("Parsed context %s." % str(context))

        try:
            # Get text from file
            # Use up to 20 additional text_files with the same bot.
            # BOT_TEXT_IMAGES/BAKECA/BAKECA_TEXT_FILE.txt
            # BOT_TEXT_IMAGES/BAKECA/BAKECA_TEXT_FILE1.txt ... _FILE20.txt
            text_file_list = [BakecaSlave.text_file]
            for i in range(1, 21):
                (basename, ext) = os.path.splitext(BakecaSlave.text_file)
                i_text_file = basename + str(i) + ext
                if os.path.exists(i_text_file):
                    text_file_list.append(i_text_file)

            text_file_id = self.city_index % len(text_file_list)
            text_file_x = text_file_list[text_file_id]

            logger.info("Getting title and content from: %s" % text_file_x)

            age, title, content = util.parse_text_file(text_file_x)
            logger.info("Got title and content.")

            # First go and get mail
            email_driver = util.get_chrome_driver(BakecaSlave.is_headless,
                                                  proxy_address)

            # util.go_to_page(driver=email_driver, page_url=util.MOAKT_URL)
            # email = util.moakt_get_email_address(email_driver)
            # email = util.smail_get_email_address(email_driver)
            email = self.smailpro_man.get_email_address(self.slave_index)
            password = util.random_string(10)
            # Get images
            logger.info("Got email [%s] and password [%s]" % (email, password))
            images, out_message = util.get_images(BakecaSlave.image_dir)
            logger.info(out_message)

            # Go to Site
            logger.info("Opening website page...")
            website_driver = util.get_chrome_driver(BakecaSlave.is_headless,
                                                    proxy_address)
            util.go_to_page(driver=website_driver,
                            page_url=CONSTANTS.WEBSITE_URL)

            # Post without register
            logger.info("Make website post...")
            is_telg_auth, is_chiudi, loaded_images = self.make_website_post(
                website_driver, city_id, category_id, age, title, content,
                images, email)

            # Close website driver
            website_driver.quit()

            # If not TELEGRAM Auth continue with post flow
            if not is_telg_auth:
                # Sleep for mail to arrive
                sleep(5)
                # Go to mail box
                logger.info("Verify email...")
                # util.moakt_access_verify_link(email_driver, '/html/body/p[5]/a')
                # util.smail_validate_link(email_driver)
                html_file = self.smailpro_man.get_message_as_temp_file()
                email_driver.get("file://" + html_file)
                sleep(2)
                util.smailpro_validate_link(email_driver)
                os.unlink(html_file)

                # Click on accept
                util.scroll_into_view_click_xpath(email_driver,
                                                  '//*[@id="accetto"]')

                # Get post link
                logger.info("Getting post url...")
                announce_link = email_driver.find_element_by_xpath(
                    '//*[@id="colonna-unica"]/div[1]/p[1]/a')
                post_url = announce_link.get_attribute('href')

                # Close email driver
                email_driver.quit()

                print(post_url)
                end = time()
            exception_raised = False

        except TimeoutException as e:
            exception_type = "Timeout on page wait."
            logger.exception("Timeout on page wait.")
            raise BakecaException("Timeout on page wait.")
        except NoSuchElementException as e:
            exception_type = "Element not found."
            logger.exception("Element not found.")
            raise BakecaException("Element not found.")
        except ElementNotInteractableException as e:
            exception_type = "Element not interactable."
            logger.exception("Element not interactable.")
            raise BakecaException("Element not interactable.")
        except util.UtilParseError as e:
            exception_type = "Parse error."
            logger.exception("Parse error.")
            raise BakecaException("Parse error.")
        except util.CaptchaSolverException as e:
            exception_type = "Failed to solve captcha in time."
            logger.exception("Failed to solve captcha in time.")
            raise BakecaException("Failed to solve captcha in time.")
        except TelegramAuthException as e:
            exception_type = "TelegramAuth was required."
            logger.exception("TelegramAuth was required.")
            raise BakecaException("TelegramAuth was required.")
        except SMailProException as e:
            exception_type = "SMailPro exception occurred."
            logger.exception("SMailPro exception occurred.")
            raise BakecaException("SMailPro exception occurred.")
        except BakecaException as e:
            exception_type = "Bakeca exception occurred"
            logger.exception("Bakeca exception occurred")
            raise e
        except Exception as e:
            exception_type = "Unknown error."
            logger.exception("Unknown error.")
            raise BakecaException("Unknown error.")
        finally:
            # Close driver
            if email_driver is not None:
                email_driver.quit()
            if website_driver is not None:
                website_driver.quit()
            if BakecaSlave.use_proxy:
                BakecaSlave.proxy.set_valid(False)
                BakecaSlave.proxy.__exit__(None, None, None)
            self.write_last_state()
            if exception_raised:
                end = time()
                logger.info(
                    "Exception was raised. Writing error to credentials.")
                util.save_credentials_error(BAKECA_CREDENTIALS_PATH,
                                            exception_type, "bakeca.com",
                                            CONSTANTS.CITIES[city_id],
                                            CONSTANTS.CATEGORIES[category_id],
                                            end - start,
                                            BakecaSlave.bakeca_lock)
                announce_msg = (
                    "BAKECA !!!FAILED!!! For City %s and category %s." %
                    (CONSTANTS.CITIES[city_id],
                     CONSTANTS.CATEGORIES[category_id]))
                logger.info(announce_msg)
                print(announce_msg)
                self.push_to_fail_queue(city_id, category_id)
                bot_logger.close_logger(logger, disable_logging)

                # if failed to solve captcha simply retry
                if exception_type is "Failed to solve captcha in time.":
                    return_queue.put(BAKECA_RETRY)
                    return BAKECA_RETRY
                else:
                    return_queue.put(BAKECA_ERROR)
                    return BAKECA_ERROR

        # Success - save credentials and post url
        website = "bakeca.com" + "\n" + "City: " + CONSTANTS.CITIES[city_id] + "\n" + "Category: " + \
                  CONSTANTS.CATEGORIES[category_id] + "\n" + "Is chiudi: " + str(
            is_chiudi) + "\n" + "Images loaded: " + str(loaded_images)

        if is_telg_auth:
            util.save_credentials(BAKECA_CREDENTIALS_PATH, email, password,
                                  "FAILED - TELEGRAM AUTH REQUIRED", website,
                                  end - start, BakecaSlave.bakeca_lock)
            # The telegram banner blocked the posting. Leave it and switch the city_it.
            announce_msg = (
                "BAKECA !!!FAILED-TELEGRAM!!! For City %s and category %s." %
                (CONSTANTS.CITIES[city_id], CONSTANTS.CATEGORIES[category_id]))
            print(announce_msg)
            logger.info(announce_msg)
        else:
            util.save_credentials(BAKECA_CREDENTIALS_PATH, email, password,
                                  post_url, website, end - start,
                                  BakecaSlave.bakeca_lock)
            # Post succeeded.
            announce_msg = (
                "BAKECA Success For City %s and category %s." %
                (CONSTANTS.CITIES[city_id], CONSTANTS.CATEGORIES[category_id]))
            print(announce_msg)
            logger.info(announce_msg)

        bot_logger.close_logger(logger, disable_logging)
        return_queue.put(BAKECA_SUCCESS)

        if BakecaSlave.use_proxy:
            BakecaSlave.proxy.set_valid(True)
            BakecaSlave.proxy.__exit__(None, None, None)

        return BAKECA_SUCCESS
Example #20
0
np.random.seed(seed=SEED)
INPUT_SIZE = 299
n_pre_epochs = 10
n_epochs = 100
batch_size = 32
n_images = 100

# Load labels
print('Load labels...')
labels = get_labels()[:n_images]

# Load training data
print('Load training data...')
x_train = np.zeros((n_images, INPUT_SIZE, INPUT_SIZE, 3), dtype=K.floatx())
for i, (img, img_id) in tqdm(
        enumerate(get_images('train', INPUT_SIZE, amount=n_images))):
    x = inception_v3.preprocess_input(np.expand_dims(img, axis=0))
    x_train[i] = x
y_train = one_hot(labels['breed'].values, num_classes=NUM_CLASSES)

# Arguments of ImageDataGenerator define types of augmentation to be performed
# E.g: Horizontal flip, rotation, etc...
# no fitting required since we don't use centering/normalization/whitening
datagen = ImageDataGenerator(rotation_range=20,
                             width_shift_range=.2,
                             height_shift_range=.2,
                             horizontal_flip=True,
                             validation_split=.1)

# Define model:
#   Add a single fully connected layer on top of the conv layers of Inception