Beispiel #1
0
 def __init__(self, directory, offset=None, min_index=None, max_index=None):
     self.frames = []
     files = sorted(real_glob("{}/*.{{jpg,png}}".format(directory)))
     num_files = len(files)
     print("There are {} files in {}".format(num_files, directory))
     if num_files == 0:
         self.is_valid = False
         return
     if min_index == None:
         min_index = 0
     if max_index == None:
         max_index = num_files
     for i in range(min_index, max_index):
         f = files[i]
         img = imread(f, mode='RGB')
         self.frames.append(image_to_texture(img))
     self.num_keyframes = len(self.frames)
     if offset is None:
         self.cur_keyframe = random.randint(0, self.num_keyframes - 1)
     else:
         self.cur_keyframe = offset
     json_file = real_glob("{}/*.json".format(directory))
     if len(json_file) != 1:
         print("PROBLEM READING JSON VECTOR FOR {}".format(directory))
     else:
         self.latent = get_json_vectors(json_file[0])[0]
Beispiel #2
0
def check_lazy_initialize(args, dmodel, smile_offsets):
    # debug: don't load anything...
    # return dmodel, smile_offsets

    # first get model ready
    if dmodel is None and (args.model is not None
                           or args.model_file is not None):
        print('Finding saved model...')
        dmodel = zoo.load_model(args.model, args.model_file, args.model_type)

    # get attributes
    if smile_offsets is None and args.anchor_offset is not None:
        offsets = get_json_vectors(args.anchor_offset)
        dim = len(offsets[0])
        offset_indexes = args.anchor_indexes.split(",")
        offset_vector = offset_from_string(offset_indexes[0], offsets, dim)
        for n in range(1, len(offset_indexes)):
            offset_vector += offset_from_string(offset_indexes[n], offsets,
                                                dim)
        smile_offsets = [offset_vector]

    return dmodel, smile_offsets
Beispiel #3
0
def run_with_args(args, dmodel, cur_anchor_image, cur_save_path, cur_z_step):
    if args.seed is not None:
        np.random.seed(args.seed)
        random.seed(args.seed)

    anchor_images = None
    if args.anchors:
        _, get_anchor_images = lazy_init_fuel_dependencies()
        allowed = None
        prohibited = None
        include_targets = False
        if(args.allowed):
            include_targets = True
            allowed = map(int, args.allowed.split(","))
        if(args.prohibited):
            include_targets = True
            prohibited = map(int, args.prohibited.split(","))
        anchor_images = get_anchor_images(args.dataset, args.split, args.offset, args.stepsize, args.numanchors, allowed, prohibited, args.image_size, args.color_convert, include_targets=include_targets)

    if cur_anchor_image is not None:
        _, _, anchor_images = anchors_from_image(cur_anchor_image, image_size=(args.image_size, args.image_size))
        if args.offset > 0:
            anchor_images = anchor_images[args.offset:]
        # untested
        if args.numanchors is not None:
            anchor_images = anchor_images[:args.numanchors]

    if args.passthrough:
        print('Preparing image grid...')
        img = grid2img(anchor_images, args.rows, args.cols, not args.tight)
        img.save(cur_save_path)
        sys.exit(0)

    if dmodel is None:
        model_class_parts = args.model_class.split(".")
        model_class_name = model_class_parts[-1]
        model_module_name = ".".join(model_class_parts[:-1])
        print("Loading {} interface from {}".format(model_class_name, model_module_name))        
        ModelClass = getattr(importlib.import_module(model_module_name), model_class_name)
        print("Loading model from {}".format(args.model))
        dmodel = ModelClass(filename=args.model)

    if anchor_images is not None:
        x_queue = anchor_images[:]
        anchors = None
        # print("========> ENCODING {} at a time".format(args.batch_size))
        while(len(x_queue) > 0):
            cur_x = x_queue[:args.batch_size]
            x_queue = x_queue[args.batch_size:]
            encoded = dmodel.encode_images(cur_x)
            if anchors is None:
                anchors = encoded
            else:
                anchors = np.concatenate((anchors, encoded), axis=0)

        # anchors = dmodel.encode_images(anchor_images)
    elif args.anchor_vectors is not None:
        anchors = get_json_vectors(args.anchor_vectors)
    else:
        anchors = None

    if args.invert_anchors:
        anchors = -1 * anchors

    if args.encoder:
        if anchors is not None:
            output_vectors(anchors)
        else:
            stream_output_vectors(dmodel, args.dataset, args.split, batch_size=args.batch_size)
        sys.exit(0)

    global_offset = None
    if args.anchor_offset is not None:
        # compute anchors as offsets from existing anchor
        offsets = get_json_vectors(args.anchor_offset)
        if args.anchor_noise:
            anchors = anchors_noise_offsets(anchors, offsets, args.rows, args.cols, args.spacing,
                cur_z_step, args.anchor_offset_x, args.anchor_offset_y,
                args.anchor_offset_x_minscale, args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale)
        else:
            anchors = anchors_from_offsets(anchors[0], offsets, args.anchor_offset_x, args.anchor_offset_y,
                args.anchor_offset_x_minscale, args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale)

    if args.global_offset is not None:
        offsets = get_json_vectors(args.global_offset)
        if args.global_ramp:
            offsets = cur_z_step * offsets
        global_offset =  get_global_offset(offsets, args.global_indices, args.global_scale)

    z_dim = dmodel.get_zdim()
    # I don't remember what partway/encircle do so they are not handling the chain layout
    # this handles the case (at least) of mines with random anchors
    if (args.partway is not None) or args.encircle or (args.mine and anchors is None):
        srows=((args.rows // args.spacing) + 1)
        scols=((args.cols // args.spacing) + 1)
        rand_anchors = generate_latent_grid(z_dim, rows=srows, cols=scols, fan=False, gradient=False,
            spherical=False, gaussian=False, anchors=None, anchor_images=None, mine=False, chain=False,
            spacing=args.spacing, analogy=False, rand_uniform=args.uniform)
        if args.partway is not None:
            l = len(rand_anchors)
            clipped_anchors = anchors[:l]
            anchors = (1.0 - args.partway) * rand_anchors + args.partway * clipped_anchors
        elif args.encircle:
            anchors = surround_anchors(srows, scols, anchors, rand_anchors)
        else:
            anchors = rand_anchors
    z = generate_latent_grid(z_dim, args.rows, args.cols, args.fan, args.gradient, not args.linear, args.gaussian,
            anchors, anchor_images, args.mine, args.chain, args.spacing, args.analogy)
    if global_offset is not None:
        z = z + global_offset

    grid_from_latents(z, dmodel, args.rows, args.cols, anchor_images, args.tight, args.shoulders, cur_save_path, args.batch_size)
    return dmodel
Beispiel #4
0
def atvec(parser, context, args):
    parser.add_argument('--dataset',
                        dest='dataset',
                        default=None,
                        help="Source dataset (for labels).")
    parser.add_argument('--labels',
                        dest='labels',
                        default=None,
                        help="Text file with 0/1 labels.")
    parser.add_argument(
        '--split',
        dest='split',
        default="train",
        help=
        "Which split to use from the dataset (train/nontrain/valid/test/any).")
    parser.add_argument("--num-attribs",
                        dest='num_attribs',
                        type=int,
                        default=40,
                        help="Number of attributes (labes)")
    parser.add_argument("--z-dim",
                        dest='z_dim',
                        type=int,
                        default=100,
                        help="z dimension of vectors")
    parser.add_argument("--encoded-vectors",
                        type=str,
                        default=None,
                        help="Comma separated list of json arrays")
    parser.add_argument(
        '--thresh',
        dest='thresh',
        default=False,
        action='store_true',
        help="Compute thresholds for attribute vectors classifiers")
    parser.add_argument('--roc',
                        dest='roc',
                        default=False,
                        action='store_true',
                        help="ROC curve of selected attribute vectors")
    parser.add_argument("--attribute-vectors",
                        dest='attribute_vectors',
                        default=None,
                        help="use json file as source of attribute vectors")
    parser.add_argument(
        "--attribute-thresholds",
        dest='attribute_thresholds',
        default=None,
        help="use these non-zero values for binary classifier thresholds")
    parser.add_argument('--attribute-indices',
                        dest='attribute_indices',
                        default=None,
                        type=str,
                        help="indices to select specific attribute vectors")
    parser.add_argument(
        "--balanced2",
        dest='balanced2',
        type=str,
        default=None,
        help="Balanced two attributes and generate atvec. eg: 20,31")
    parser.add_argument(
        "--balanced",
        dest='balanced',
        type=str,
        default=None,
        help="Balance attributes and generate atvec. eg: 20,21,31")
    parser.add_argument("--avg-diff",
                        dest='avg_diff',
                        type=str,
                        default=None,
                        help="Two lists of vectors to average and then diff")
    parser.add_argument('--outfile',
                        dest='outfile',
                        default=None,
                        help="Output json file for vectors.")
    args = parser.parse_args(args)

    if args.avg_diff:
        vecs1, vecs2 = args.avg_diff.split(",")
        encoded1 = json_list_to_array(vecs1)
        encoded2 = json_list_to_array(vecs2)
        print("Taking the difference between {} and {} vectors".format(
            len(encoded1), len(encoded2)))
        m1 = np.mean(encoded1, axis=0)
        m2 = np.mean(encoded2, axis=0)
        atvec = m2 - m1
        z_dim, = atvec.shape
        atvecs = atvec.reshape(1, z_dim)
        print("Computed diff shape: {}".format(atvecs.shape))
        if args.outfile is not None:
            save_json_attribs(atvecs, args.outfile)
        sys.exit(0)

    encoded = json_list_to_array(args.encoded_vectors)
    num_rows, z_dim = encoded.shape
    if args.dataset:
        attribs = np.array(
            list(
                get_dataset_iterator(args.dataset,
                                     args.split,
                                     include_features=False,
                                     include_targets=True)))
    else:
        attribs = get_attribs_from_file(args.labels)
    print("encoded vectors: {}, attributes: {} ".format(
        encoded.shape, attribs.shape))

    if args.roc:
        atvecs = get_json_vectors(args.attribute_vectors)
        dim = len(atvecs[0])
        chosen_vector = offset_from_string(args.attribute_indices, atvecs, dim)
        if args.attribute_thresholds is not None:
            atvec_thresholds = get_json_vectors(args.attribute_thresholds)
            threshold = atvec_thresholds[0][int(args.attribute_indices)]
        else:
            threshold = None
        do_roc(chosen_vector, encoded, attribs, int(args.attribute_indices),
               threshold, args.outfile)
        sys.exit(0)

    if args.thresh:
        atvecs = get_json_vectors(args.attribute_vectors)
        do_thresh(atvecs, encoded, attribs, args.outfile)
        sys.exit(0)

    if (args.balanced2):
        indexes = map(int, args.balanced2.split(","))
        with_attr, without_attr = get_balanced_averages2(
            attribs, encoded, indexes[0], indexes[1])
        num_attribs = 2
    elif (args.balanced):
        indexes = map(int, args.balanced.split(","))
        with_attr, without_attr = get_balanced_averages(
            attribs, encoded, indexes)
        num_attribs = len(indexes)
    else:
        with_attr, without_attr = get_averages(attribs, encoded,
                                               args.num_attribs)
        num_attribs = args.num_attribs

    atvects = averages_to_attribute_vectors(with_attr, without_attr,
                                            num_attribs, z_dim)
    print("Computed atvecs shape: {}".format(atvects.shape))

    if args.outfile is not None:
        save_json_attribs(atvects, args.outfile)
Beispiel #5
0
def atvec(parser, context, args):
    parser.add_argument('--dataset',
                        dest='dataset',
                        default=None,
                        help="Source dataset (for labels).")
    # memo: --labels became --attributes when --classes was added
    parser.add_argument('--attributes',
                        dest='attributes',
                        default=None,
                        help="Text file with 0/1 labels.")
    parser.add_argument('--classes',
                        dest='classes',
                        default=None,
                        help="Text file with 0/1/2/.../num-classes-1 labels.")
    parser.add_argument(
        '--split',
        dest='split',
        default="train",
        help=
        "Which split to use from the dataset (train/nontrain/valid/test/any).")
    parser.add_argument("--num-attribs",
                        dest='num_attribs',
                        type=int,
                        default=40,
                        help="Number of attributes (labes)")
    parser.add_argument(
        "--which-attribs",
        type=str,
        default=None,
        help="optional comma separated list of attributes to run")
    parser.add_argument(
        "--num-classes",
        dest='num_classes',
        type=int,
        default=None,
        help="For multiclass, number of classes (assumed 0 .. n-1)")
    parser.add_argument("--z-dim",
                        dest='z_dim',
                        type=int,
                        default=100,
                        help="z dimension of vectors")
    parser.add_argument("--encoded-vectors",
                        type=str,
                        default=None,
                        help="Comma separated list of json arrays")
    parser.add_argument("--encoded-true",
                        type=str,
                        default=None,
                        help="Comma separated list of json arrays (true)")
    parser.add_argument("--encoded-false",
                        type=str,
                        default=None,
                        help="Comma separated list of json arrays (false)")
    parser.add_argument(
        '--thresh',
        dest='thresh',
        default=False,
        action='store_true',
        help="Compute thresholds for attribute vectors classifiers")
    parser.add_argument('--svm',
                        dest='svm',
                        default=False,
                        action='store_true',
                        help="Use SVM for computing attribute vectors")
    parser.add_argument("--limit",
                        dest='limit',
                        type=int,
                        default=None,
                        help="Limit number of inputs when computing atvecs")
    parser.add_argument('--roc',
                        dest='roc',
                        default=False,
                        action='store_true',
                        help="ROC curve of selected attribute vectors")
    parser.add_argument("--attribute-vectors",
                        dest='attribute_vectors',
                        default=None,
                        help="use json file as source of attribute vectors")
    parser.add_argument(
        "--attribute-thresholds",
        dest='attribute_thresholds',
        default=None,
        help="use these non-zero values for binary classifier thresholds")
    parser.add_argument("--attribute-set",
                        dest='attribute_set',
                        default="all",
                        help="score ROC/accuracy against true/false/all")
    parser.add_argument('--attribute-indices',
                        dest='attribute_indices',
                        default=None,
                        type=str,
                        help="indices to select specific attribute vectors")
    parser.add_argument(
        "--balanced2",
        dest='balanced2',
        type=str,
        default=None,
        help="Balanced two attributes and generate atvec. eg: 20,31")
    parser.add_argument(
        "--balanced",
        dest='balanced',
        type=str,
        default=None,
        help="Balance attributes and generate atvec. eg: 20,21,31")
    parser.add_argument("--avg-diff",
                        dest='avg_diff',
                        type=str,
                        default=None,
                        help="Two lists of vectors to average and then diff")
    parser.add_argument(
        "--svm-diff",
        dest='svm_diff',
        type=str,
        default=None,
        help="Two lists of vectors to average and then svm diff")
    parser.add_argument('--outfile',
                        dest='outfile',
                        default=None,
                        help="Output json file for vectors.")
    args = parser.parse_args(args)

    if args.avg_diff:
        vecs1, vecs2 = args.avg_diff.split(",")
        encoded1 = json_list_to_array(vecs1)
        encoded2 = json_list_to_array(vecs2)
        print("Taking the difference between {} and {} vectors".format(
            len(encoded1), len(encoded2)))
        m1 = np.mean(encoded1, axis=0)
        m2 = np.mean(encoded2, axis=0)
        atvec = m2 - m1
        z_dim, = atvec.shape
        atvecs = atvec.reshape(1, z_dim)
        print("Computed diff shape: {}".format(atvecs.shape))
        if args.outfile is not None:
            save_json_attribs(atvecs, args.outfile)
        sys.exit(0)

    if args.svm_diff:
        vecs1, vecs2 = args.svm_diff.split(",")
        encoded1 = json_list_to_array(vecs1)
        encoded2 = json_list_to_array(vecs2)
        print("Taking the svm difference between {} and {} vectors".format(
            len(encoded1), len(encoded2)))
        h = .02  # step size in the mesh
        C = 1.0  # SVM regularization parameter
        X_arr = []
        y_arr = []
        for l in range(len(encoded1)):
            X_arr.append(encoded1[l])
            y_arr.append(False)
        for l in range(len(encoded2)):
            X_arr.append(encoded2[l])
            y_arr.append(True)
        X = np.array(X_arr)
        y = np.array(y_arr)
        # svc = svm.LinearSVC(C=C, class_weight="balanced").fit(X, y)
        svc = svm.LinearSVC(C=C).fit(X, y)
        # get the separating hyperplane
        w = svc.coef_[0]

        #FIXME: this is a scaling hack.
        m1 = np.mean(encoded1, axis=0)
        m2 = np.mean(encoded2, axis=0)
        mean_vector = m1 - m2
        mean_length = np.linalg.norm(mean_vector)
        svn_length = np.linalg.norm(w)

        atvec = (mean_length / svn_length) * w
        z_dim, = atvec.shape
        atvecs = atvec.reshape(1, z_dim)
        print("Computed svm diff shape: {}".format(atvecs.shape))
        if args.outfile is not None:
            save_json_attribs(atvecs, args.outfile)
        sys.exit(0)

    print("reading encoded vectors...")
    attribs = None
    if args.encoded_vectors is not None:
        if args.encoded_vectors.endswith("json"):
            encoded = json_list_to_array(args.encoded_vectors)
            print("Read json array: {}".format(encoded.shape))
        else:
            encoded = np.load(args.encoded_vectors)['arr_0']
            print("Read numpy array: {}".format(encoded.shape))
    else:
        if args.encoded_true.endswith("json"):
            encoded_true = json_list_to_array(args.encoded_true)
            print("Read true json array: {}".format(encoded_true.shape))
        else:
            encoded_true = np.load(args.encoded_true)['arr_0']
            print("Read true numpy array: {}".format(encoded_true.shape))
        if args.encoded_false.endswith("json"):
            encoded_false = json_list_to_array(args.encoded_false)
            print("Read false json array: {}".format(encoded_false.shape))
        else:
            encoded_false = np.load(args.encoded_false)['arr_0']
            print("Read false numpy array: {}".format(encoded_false.shape))
        encoded = np.concatenate((encoded_true, encoded_false), axis=0)
        num_true = len(encoded_true)
        num_false = len(encoded_false)
        true_values = np.ones(shape=[num_true, 1, 1], dtype=np.int)
        false_values = np.zeros(shape=[num_false, 1, 1], dtype=np.int)
        attribs = np.concatenate((true_values, false_values), axis=0)

    if args.limit is not None:
        encoded = encoded[:args.limit]
    num_rows, z_dim = encoded.shape
    if attribs is None:
        print("reading attributes...")
        if args.dataset:
            attribs = np.array(
                list(
                    get_dataset_iterator(args.dataset,
                                         args.split,
                                         include_features=False,
                                         include_targets=True)))
            print("Read attributes from dataset: {}".format(attribs.shape))
        elif args.attributes is not None:
            print("Read attributes from file: {}".format(args.attributes))
            attribs = get_attribs_from_files(args.attributes)
        elif args.classes is not None:
            print("Read attributes from file: {}".format(args.classes))
            attribs = get_attribs_from_class_file(args.classes,
                                                  args.num_classes)
        else:
            print(
                "Don't know how to get labels: try --attributes or --classes")
            sys.exit(1)

    if args.which_attribs is not None:
        attribs = filter_attributes(attribs, args.which_attribs)
    print("encoded vectors: {}, attributes: {} ".format(
        encoded.shape, attribs.shape))

    if args.roc:
        atvecs = get_json_vectors(args.attribute_vectors)
        dim = len(atvecs[0])
        chosen_vector = offset_from_string(args.attribute_indices, atvecs, dim)
        if args.attribute_thresholds is not None:
            atvec_thresholds = get_json_vectors(args.attribute_thresholds)
            threshold = atvec_thresholds[0][int(args.attribute_indices)]
        else:
            threshold = None
        do_roc(chosen_vector,
               encoded,
               attribs,
               int(args.attribute_indices),
               threshold,
               args.attribute_set,
               args.outfile,
               isclass=False)
        # do_roc(chosen_vector, encoded, attribs, int(args.attribute_indices), threshold, args.attribute_set, args.outfile, isclass=(args.num_classes is not None))
        sys.exit(0)

    if args.thresh:
        atvecs = get_json_vectors(args.attribute_vectors)
        do_thresh(atvecs,
                  encoded,
                  attribs,
                  args.outfile,
                  isclass=(args.num_classes is not None))
        sys.exit(0)

    if (args.balanced2):
        indexes = map(int, args.balanced2.split(","))
        with_attr, without_attr = get_balanced_averages2(
            attribs, encoded, indexes[0], indexes[1])
        num_attribs = 2
    elif (args.balanced):
        indexes = map(int, args.balanced.split(","))
        with_attr, without_attr = get_balanced_averages(
            attribs, encoded, indexes)
        num_attribs = len(indexes)
    # I can't remember why
    # elif args.num_classes is not None:
    #     with_attr, without_attr = get_class_averages(attribs, encoded, args.num_classes);
    #     num_attribs = args.num_classes
    elif args.num_attribs is not None:
        with_attr, without_attr = get_averages(attribs, encoded)
        num_attribs = args.num_attribs
    else:
        print("I think we need either num_classes or num_attribs or something")
        sys.exit(0)

    if args.svm:
        atvects = averages_to_svm_attribute_vectors(with_attr, without_attr)
    else:
        atvects = averages_to_attribute_vectors(with_attr, without_attr)
    print("Computed atvecs shape: {}".format(atvects.shape))

    if args.outfile is not None:
        save_json_attribs(atvects, args.outfile)
Beispiel #6
0
def run_with_args(args,
                  dmodel,
                  cur_anchor_image,
                  cur_save_path,
                  cur_z_step,
                  cur_basename="basename",
                  range_data=None,
                  template_dict={}):
    anchor_images = None
    anchor_labels = None
    if args.anchors:
        allowed = None
        prohibited = None
        include_targets = False
        if (args.allowed):
            include_targets = True
            allowed = map(int, args.allowed.split(","))
        if (args.prohibited):
            include_targets = True
            prohibited = map(int, args.prohibited.split(","))
        anchor_images = get_anchor_images(args.dataset,
                                          args.split,
                                          args.offset,
                                          args.stepsize,
                                          args.numanchors,
                                          allowed,
                                          prohibited,
                                          args.image_size,
                                          args.color_convert,
                                          include_targets=include_targets)
        if args.with_labels:
            anchor_labels = get_anchor_labels(args.dataset, args.split,
                                              args.offset, args.stepsize,
                                              args.numanchors)

    if args.anchor_glob is not None:
        files = plat.sampling.real_glob(args.anchor_glob)
        if args.offset > 0:
            files = files[args.offset:]
        if args.stepsize > 1:
            files = files[::args.stepsize]
        if args.numanchors is not None:
            files = files[:args.numanchors]
        anchor_images = anchors_from_filelist(files)
        print("Read {} images from {} files".format(len(anchor_images),
                                                    len(files)))
        if len(anchor_images) == 0:
            print("No images, cannot contine")
            sys.exit(0)

    if cur_anchor_image is not None:
        _, _, anchor_images = anchors_from_image(cur_anchor_image,
                                                 image_size=(args.image_size,
                                                             args.image_size))
        if args.offset > 0:
            anchor_images = anchor_images[args.offset:]
        if args.stepsize > 0:
            anchor_images = anchor_images[::args.stepsize]
        if args.numanchors is not None:
            anchor_images = anchor_images[:args.numanchors]

    # at this point we can make a dummy anchor_labels if we need
    if anchor_images is not None and anchor_labels is None:
        anchor_labels = [None] * len(anchor_images)

    if args.passthrough:
        # determine final filename string
        image_size = anchor_images[0].shape[1]
        save_path = plat.sampling.emit_filename(cur_save_path, {}, args)
        print("Preparing image file {}".format(save_path))
        img = grid2img(anchor_images, args.rows, args.cols, not args.tight)
        img.save(save_path)
        sys.exit(0)

    if dmodel is None:
        dmodel = zoo.load_model(args.model, args.model_file, args.model_type,
                                args.model_interface)

    embedded = None
    if anchor_images is not None:
        x_queue = anchor_images[:]
        c_queue = anchor_labels[:]
        anchors = None
        # print("========> ENCODING {} at a time".format(args.batch_size))
        while (len(x_queue) > 0):
            cur_x = x_queue[:args.batch_size]
            cur_c = c_queue[:args.batch_size]
            x_queue = x_queue[args.batch_size:]
            c_queue = c_queue[args.batch_size:]
            encoded = dmodel.encode_images(cur_x, cur_c)
            try:
                emb_l = dmodel.embed_labels(cur_c)
            except AttributeError:
                emb_l = [None] * args.batch_size
            if anchors is None:
                anchors = encoded
                embedded = emb_l
            else:
                anchors = np.concatenate((anchors, encoded), axis=0)
                embedded = np.concatenate((embedded, emb_l), axis=0)

        # anchors = dmodel.encode_images(anchor_images)
    elif args.anchor_vectors is not None:
        anchors = get_json_vectors(args.anchor_vectors)
    else:
        anchors = None

    if args.invert_anchors:
        anchors = -1 * anchors

    if args.encoder:
        if anchors is not None:
            plat.sampling.output_vectors(anchors, args.save_path)
        else:
            plat.sampling.stream_output_vectors(dmodel,
                                                args.dataset,
                                                args.split,
                                                args.save_path,
                                                batch_size=args.batch_size)
        sys.exit(0)

    global_offset = None
    if args.anchor_offset is not None:
        # compute anchors as offsets from existing anchor
        offsets = get_json_vectors(args.anchor_offset)
        if args.anchor_wave:
            anchors = plat.sampling.anchors_wave_offsets(
                anchors, offsets, args.rows, args.cols, args.spacing,
                args.radial_wave, args.clip_wave, cur_z_step,
                args.anchor_offset_x, args.anchor_offset_x_minscale,
                args.anchor_offset_x_maxscale)
        elif args.anchor_noise:
            anchors = plat.sampling.anchors_noise_offsets(
                anchors, offsets, args.rows, args.cols, args.spacing,
                cur_z_step, args.anchor_offset_x, args.anchor_offset_y,
                args.anchor_offset_x_minscale, args.anchor_offset_y_minscale,
                args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale)
        elif range_data is not None:
            anchors = plat.sampling.anchors_json_offsets(
                anchors, offsets, args.rows, args.cols, args.spacing,
                cur_z_step, args.anchor_offset_x, args.anchor_offset_y,
                args.anchor_offset_x_minscale, args.anchor_offset_y_minscale,
                args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale,
                range_data)
        else:
            anchors = plat.sampling.anchors_from_offsets(
                anchors[0], offsets, args.anchor_offset_x,
                args.anchor_offset_y, args.anchor_offset_x_minscale,
                args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale,
                args.anchor_offset_y_maxscale)

    if args.global_offset is not None:
        offsets = get_json_vectors(args.global_offset)
        if args.global_ramp:
            offsets = cur_z_step * offsets
        global_offset = plat.sampling.get_global_offset(
            offsets, args.global_indices, args.global_scale)

    z_dim = dmodel.get_zdim()
    # I don't remember what partway/encircle do so they are not handling the chain layout
    # this handles the case (at least) of mines with random anchors
    if (args.partway is not None) or args.encircle or (anchors is None):
        srows = ((args.rows // args.spacing) + 1)
        scols = ((args.cols // args.spacing) + 1)
        rand_anchors = plat.sampling.generate_latent_grid(
            z_dim,
            rows=srows,
            cols=scols,
            fan=False,
            gradient=False,
            spherical=False,
            gaussian=False,
            anchors=None,
            anchor_images=None,
            mine=False,
            chain=False,
            spacing=args.spacing,
            analogy=False,
            rand_uniform=args.uniform)
        if args.partway is not None:
            l = len(rand_anchors)
            clipped_anchors = anchors[:l]
            anchors = (1.0 - args.partway
                       ) * rand_anchors + args.partway * clipped_anchors
        elif args.encircle:
            anchors = surround_anchors(srows, scols, anchors, rand_anchors)
        else:
            anchors = rand_anchors
    z = plat.sampling.generate_latent_grid(z_dim, args.rows, args.cols,
                                           args.fan, args.gradient,
                                           not args.linear, args.gaussian,
                                           anchors, anchor_images, True,
                                           args.chain, args.spacing,
                                           args.analogy)
    if global_offset is not None:
        z = z + global_offset

    template_dict["BASENAME"] = cur_basename
    # emb_l = None
    # emb_l = [None] * len(z)
    if args.clone_label is not None:
        emb_l = np.tile(embedded[args.clone_label], [len(z), 1])
    else:
        emb_l = plat.sampling.generate_latent_grid(
            z_dim, args.rows, args.cols, args.fan, args.gradient,
            not args.linear, args.gaussian, embedded, anchor_images, True,
            args.chain, args.spacing, args.analogy)

    #TODO - maybe not best way to check if labels are valid
    # if anchor_labels is None or anchor_labels[0] is None:
    #     emb_l = [None] * len(z)
    plat.sampling.grid_from_latents(z,
                                    dmodel,
                                    args.rows,
                                    args.cols,
                                    anchor_images,
                                    args.tight,
                                    args.shoulders,
                                    cur_save_path,
                                    args,
                                    args.batch_size,
                                    template_dict=template_dict,
                                    emb_l=emb_l)
    return dmodel
Beispiel #7
0
def canvas(parser, context, args):
    parser = argparse.ArgumentParser(description="Plot model samples")
    parser.add_argument("--model",
                        dest='model',
                        type=str,
                        default=None,
                        help="name of model in plat zoo")
    parser.add_argument("--model-file",
                        dest='model_file',
                        type=str,
                        default=None,
                        help="path to the saved model")
    parser.add_argument(
        "--model-type",
        dest='model_type',
        type=str,
        default=None,
        help="the type of model (usually inferred from filename)")
    parser.add_argument(
        "--model-interface",
        dest='model_interface',
        type=str,
        default=None,
        help="class interface for model (usually inferred from model-type)")
    parser.add_argument("--width",
                        type=int,
                        default=512,
                        help="width of canvas to render in pixels")
    parser.add_argument("--height",
                        type=int,
                        default=512,
                        help="height of canvas to render in pixels")
    parser.add_argument("--rows",
                        type=int,
                        default=3,
                        help="number of rows of anchors")
    parser.add_argument("--cols",
                        type=int,
                        default=3,
                        help="number of columns of anchors")
    parser.add_argument("--xmin",
                        type=int,
                        default=0,
                        help="min x in virtual space")
    parser.add_argument("--xmax",
                        type=int,
                        default=100,
                        help="max x in virtual space")
    parser.add_argument("--ymin",
                        type=int,
                        default=0,
                        help="min y in virtual space")
    parser.add_argument("--ymax",
                        type=int,
                        default=100,
                        help="max y in virtual space")
    parser.add_argument("--outfile",
                        dest='save_path',
                        type=str,
                        default="canvas_%DATE%_%MODEL%_%SEQ%.png",
                        help="where to save the generated samples")
    parser.add_argument("--seed",
                        type=int,
                        default=None,
                        help="Optional random seed")
    parser.add_argument('--do-check-bounds',
                        dest='do_check_bounds',
                        default=False,
                        action='store_true',
                        help="clip to drawing bounds")
    parser.add_argument('--anchor-image',
                        dest='anchor_image',
                        default=None,
                        help="use image as source of anchors")
    parser.add_argument('--anchor-mine',
                        dest='anchor_mine',
                        default=None,
                        help="use image as single source of mine coordinates")
    parser.add_argument(
        '--random-mine',
        dest='random_mine',
        default=False,
        action='store_true',
        help="use random sampling as source of mine coordinates")
    parser.add_argument('--additive',
                        dest='additive',
                        default=False,
                        action='store_true',
                        help="use additive compositing")
    parser.add_argument(
        '--mask-name',
        dest='mask_name',
        default=None,
        help="prefix name for alpha mask to use (full/rounded/hex")
    parser.add_argument('--mask-layout',
                        dest='mask_layout',
                        default=None,
                        help="use image as source of mine grid points")
    parser.add_argument('--mask-scale',
                        dest='mask_scale',
                        default=1.0,
                        type=float,
                        help="Scale mask layout (squeeze)")
    parser.add_argument('--mask-width',
                        dest='mask_width',
                        type=int,
                        default=15,
                        help="width for computed mask")
    parser.add_argument('--mask-height',
                        dest='mask_height',
                        type=int,
                        default=15,
                        help="height for computed mask")
    parser.add_argument('--mask-radius',
                        dest='mask_radius',
                        default=None,
                        type=float,
                        help="radius for computed mask")
    parser.add_argument('--layout',
                        dest='layout',
                        default=None,
                        help="layout json file")
    parser.add_argument('--layout-scale',
                        dest='layout_scale',
                        default=1,
                        type=int,
                        help="Scale layout")
    parser.add_argument('--batch-size',
                        dest='batch_size',
                        type=int,
                        default=100,
                        help="number of images to decode at once")
    parser.add_argument('--passthrough',
                        dest='passthrough',
                        default=False,
                        action='store_true',
                        help="Use originals instead of reconstructions")
    parser.add_argument('--anchor-offset',
                        dest='anchor_offset',
                        default=None,
                        help="use json file as source of each anchors offsets")
    parser.add_argument('--anchor-offset-a',
                        dest='anchor_offset_a',
                        default="42",
                        type=str,
                        help="which indices to combine for offset a")
    parser.add_argument('--anchor-offset-b',
                        dest='anchor_offset_b',
                        default="31",
                        type=str,
                        help="which indices to combine for offset b")
    parser.add_argument("--image-size",
                        dest='image_size',
                        type=int,
                        default=64,
                        help="size of (offset) images")
    parser.add_argument('--global-offset',
                        dest='global_offset',
                        default=None,
                        help="use json file as source of global offsets")
    parser.add_argument('--global-indices',
                        dest='global_indices',
                        default=None,
                        type=str,
                        help="offset indices to apply globally")
    parser.add_argument('--global-scale',
                        dest='global_scale',
                        default=1.0,
                        type=float,
                        help="scaling factor for global offset")
    args = parser.parse_args(args)

    template_dict = {}
    if args.seed:
        np.random.seed(args.seed)
        random.seed(args.seed)

    global_offset = None
    if args.global_offset is not None:
        offsets = get_json_vectors(args.global_offset)
        global_offset = plat.sampling.get_global_offset(
            offsets, args.global_indices, args.global_scale)

    anchor_images = None
    if args.anchor_image is not None:
        _, _, anchor_images = anchors_from_image(args.anchor_image,
                                                 image_size=(args.image_size,
                                                             args.image_size))
    elif args.anchor_mine is not None:
        _, _, anchor_images = anchors_from_image(args.anchor_mine,
                                                 image_size=(args.image_size,
                                                             args.image_size))
        basename = os.path.basename(args.anchor_mine)
        template_dict["BASENAME"] = os.path.splitext(basename)[0]

    anchors = None
    if not args.passthrough:
        dmodel = zoo.load_model(args.model, args.model_file, args.model_type,
                                args.model_interface)

        workq = anchor_images[:]
        anchors_list = []
        while (len(workq) > 0):
            print("Processing {} anchors".format(args.batch_size))
            curq = workq[:args.batch_size]
            workq = workq[args.batch_size:]
            cur_anchors = dmodel.encode_images(curq)
            for c in cur_anchors:
                anchors_list.append(c)
        anchors = np.asarray(anchors_list)

    if anchors is None:
        anchors = np.random.normal(loc=0,
                                   scale=1,
                                   size=(args.cols * args.rows, 100))

    anchor_offsets = None
    if args.anchor_offset is not None:
        # compute anchors as offsets from existing anchor
        anchor_offsets = get_json_vectors(args.anchor_offset)

    canvas = Canvas(args.width, args.height, args.xmin, args.xmax, args.ymin,
                    args.ymax, args.mask_name, args.image_size,
                    args.do_check_bounds)
    workq = []

    do_hex = True

    if args.layout:
        with open(args.layout) as json_file:
            layout_data = json.load(json_file)
        xy = np.array(layout_data["xy"])
        grid_size = layout_data["size"]
        roots = layout_data["r"]
        if "s" in layout_data:
            s = layout_data["s"]
        else:
            s = None
        for i, pair in enumerate(xy):
            x = pair[0] * canvas.canvas_xmax / grid_size[0]
            y = pair[1] * canvas.canvas_ymax / grid_size[1]
            a = (pair[0] + 0.5 * s[i]) / float(grid_size[0])
            b = (pair[1] + 0.5 * s[i]) / float(grid_size[1])
            r = roots[i]
            if s is None:
                scale = args.layout_scale
            else:
                scale = s[i] * args.layout_scale
            # print("Placing {} at {}, {} because {},{} and {}, {}".format(scale, x, y, canvas.canvas_xmax, canvas.canvas_ymax, grid_size[0], grid_size[1]))
            if args.passthrough:
                output_image = anchor_images[r]
                canvas.place_image(output_image,
                                   x,
                                   y,
                                   args.additive,
                                   scale=scale)
            else:
                if args.anchor_mine is not None or args.random_mine:
                    z = create_mine_canvas(args.rows, args.cols, b, a, anchors)
                elif anchor_offsets is not None:
                    z = apply_anchor_offsets(anchors[r], anchor_offsets, a, b,
                                             args.anchor_offset_a,
                                             args.anchor_offset_b)
                else:
                    z = anchors[r]

                if global_offset is not None:
                    z = z + global_offset
                # print("Storing {},{} with {}".format(x, y, len(z)))
                workq.append({"z": z, "x": x, "y": y, "s": scale})

    elif args.mask_layout or args.mask_radius:
        if args.mask_layout:
            rawim = imread(args.mask_layout)
            if len(rawim.shape) == 2:
                im_height, im_width = rawim.shape
                mask_layout = rawim
            else:
                im_height, im_width, _ = rawim.shape
                mask_layout = rawim[:, :, 0]
        else:
            im_height, im_width = args.mask_height, args.mask_width
            mask_layout = make_mask_layout(im_height, im_width,
                                           args.mask_radius)
        for xpos in range(im_width):
            for ypos in range(im_height):
                a = float(xpos) / (im_width - 1)
                if do_hex and ypos % 2 == 0:
                    a = a + 0.5 / (im_width - 1)
                x = args.mask_scale * canvas.xmax * a
                b = float(ypos) / (im_height - 1)
                y = args.mask_scale * canvas.ymax * b
                if not mask_layout[ypos][xpos] > 128:
                    pass
                elif args.passthrough:
                    output_image = anchor_images[0]
                    canvas.place_image(output_image, x, y, args.additive)
                else:
                    if len(anchors) == 1 or anchor_offsets is not None:
                        z = apply_anchor_offsets(anchors[0], anchor_offsets, a,
                                                 b, args.anchor_offset_a,
                                                 args.anchor_offset_b)
                    else:
                        z = create_mine_canvas(args.rows, args.cols, b, a,
                                               anchors)

                    if global_offset is not None:
                        z = z + global_offset

                    workq.append({"z": z, "x": x, "y": y, "s": 1.0})

    while (len(workq) > 0):
        curq = workq[:args.batch_size]
        workq = workq[args.batch_size:]
        latents = [e["z"] for e in curq]
        images = dmodel.sample_at(np.array(latents))
        for i in range(len(curq)):
            # print("Placing {},{} with {}".format(curq[i]["x"], curq[i]["y"], len(latents)))
            canvas.place_image(images[i],
                               curq[i]["x"],
                               curq[i]["y"],
                               args.additive,
                               scale=curq[i]["s"])
            # print("Placed")

    template_dict["SIZE"] = args.image_size
    outfile = plat.sampling.emit_filename(args.save_path, template_dict, args)
    canvas.save(outfile)
Beispiel #8
0
def canvas(parser, context, args):
    parser = argparse.ArgumentParser(description="Plot model samples")
    parser.add_argument("--model", dest='model', type=str, default=None,
                        help="name of model in plat zoo")
    parser.add_argument("--model-file", dest='model_file', type=str, default=None,
                        help="path to the saved model")
    parser.add_argument("--model-type", dest='model_type', type=str, default=None,
                        help="the type of model (usually inferred from filename)")
    parser.add_argument("--model-interface", dest='model_interface', type=str,
                        default=None,
                        help="class interface for model (usually inferred from model-type)")
    parser.add_argument("--width", type=int, default=512,
                        help="width of canvas to render in pixels")
    parser.add_argument("--height", type=int, default=512,
                        help="height of canvas to render in pixels")
    parser.add_argument("--rows", type=int, default=3,
                        help="number of rows of anchors")
    parser.add_argument("--cols", type=int, default=3,
                        help="number of columns of anchors")
    parser.add_argument("--xmin", type=int, default=0,
                        help="min x in virtual space")
    parser.add_argument("--xmax", type=int, default=100,
                        help="max x in virtual space")
    parser.add_argument("--ymin", type=int, default=0,
                        help="min y in virtual space")
    parser.add_argument("--ymax", type=int, default=100,
                        help="max y in virtual space")
    parser.add_argument("--outfile", dest='save_path', type=str, default="canvas_%DATE%_%MODEL%_%SEQ%.png",
                        help="where to save the generated samples")
    parser.add_argument("--seed", type=int,
                        default=None, help="Optional random seed")
    parser.add_argument('--do-check-bounds', dest='do_check_bounds', default=False, action='store_true',
                        help="clip to drawing bounds")
    parser.add_argument('--background-image', dest='background_image', default=None,
                        help="use image initial background")
    parser.add_argument('--anchor-image', dest='anchor_image', default=None,
                        help="use image as source of anchors")
    parser.add_argument('--anchor-mine', dest='anchor_mine', default=None,
                        help="use image as single source of mine coordinates")    
    parser.add_argument('--anchor-canvas', dest='anchor_canvas', default=False, action='store_true',
                        help="anchor image from canvas")
    parser.add_argument('--random-mine', dest='random_mine', default=False, action='store_true',
                        help="use random sampling as source of mine coordinates")
    parser.add_argument('--additive', dest='additive', default=False, action='store_true',
                        help="use additive compositing")
    parser.add_argument('--mask-name', dest='mask_name', default=None,
                        help="prefix name for alpha mask to use (full/rounded/hex")
    parser.add_argument('--mask-layout', dest='mask_layout', default=None,
                        help="use image as source of mine grid points")
    parser.add_argument('--mask-scale', dest='mask_scale', default=1.0, type=float,
                        help="Scale mask layout (squeeze)")
    parser.add_argument('--mask-width', dest='mask_width', type=int, default=15,
                        help="width for computed mask")
    parser.add_argument('--mask-height', dest='mask_height', type=int, default=15,
                        help="height for computed mask")
    parser.add_argument('--mask-radius', dest='mask_radius', default=None, type=float,
                        help="radius for computed mask")
    parser.add_argument('--layout', dest='layout', default=None,
                        help="layout json file")
    parser.add_argument('--layout-scale', dest='layout_scale', default=1, type=int,
                        help="Scale layout")
    parser.add_argument('--batch-size', dest='batch_size', type=int, default=100,
                        help="number of images to decode at once")
    parser.add_argument('--passthrough', dest='passthrough', default=False, action='store_true',
                        help="Use originals instead of reconstructions")
    parser.add_argument('--anchor-offset', dest='anchor_offset', default=None,
                        help="use json file as source of each anchors offsets")
    parser.add_argument('--anchor-offset-a', dest='anchor_offset_a', default="42", type=str,
                        help="which indices to combine for offset a")
    parser.add_argument('--anchor-offset-b', dest='anchor_offset_b', default="31", type=str,
                        help="which indices to combine for offset b")
    parser.add_argument("--image-size", dest='image_size', type=int, default=64,
                        help="size of (offset) images")
    parser.add_argument('--global-offset', dest='global_offset', default=None,
                        help="use json file as source of global offsets")
    parser.add_argument('--global-indices', dest='global_indices', default=None, type=str,
                        help="offset indices to apply globally")
    parser.add_argument('--global-scale', dest='global_scale', default=1.0, type=float,
                        help="scaling factor for global offset")
    args = parser.parse_args(args)

    template_dict = {}
    if args.seed:
        np.random.seed(args.seed)
        random.seed(args.seed)

    global_offset = None
    if args.global_offset is not None:
        offsets = get_json_vectors(args.global_offset)
        global_offset = plat.sampling.get_global_offset(offsets, args.global_indices, args.global_scale)

    anchor_images = None
    if args.anchor_image is not None:
        _, _, anchor_images = anchors_from_image(args.anchor_image, image_size=(args.image_size, args.image_size))
    elif args.anchor_mine is not None:
        _, _, anchor_images = anchors_from_image(args.anchor_mine, image_size=(args.image_size, args.image_size))
        basename = os.path.basename(args.anchor_mine)
        template_dict["BASENAME"] = os.path.splitext(basename)[0]

    anchors = None
    if not args.passthrough:
        dmodel = zoo.load_model(args.model, args.model_file, args.model_type, args.model_interface)

        workq = anchor_images[:]
        anchors_list = []
        while(len(workq) > 0):
            print("Processing {} anchors".format(args.batch_size))
            curq = workq[:args.batch_size]
            workq = workq[args.batch_size:]
            cur_anchors = dmodel.encode_images(curq)
            for c in cur_anchors:
                anchors_list.append(c)
        anchors = np.asarray(anchors_list)

    if anchors is None:
        anchors = np.random.normal(loc=0, scale=1, size=(args.cols * args.rows, 100))

    anchor_offsets = None
    if args.anchor_offset is not None:
        # compute anchors as offsets from existing anchor
        anchor_offsets = get_json_vectors(args.anchor_offset)

    canvas = Canvas(args.width, args.height, args.xmin, args.xmax, args.ymin, args.ymax, args.mask_name, args.image_size, args.do_check_bounds)
    if args.background_image is not None:
        canvas.set_background(args.background_image)
    workq = []

    do_hex = True

    if args.layout:
        with open(args.layout) as json_file:
            layout_data = json.load(json_file)
        xy = np.array(layout_data["xy"])
        grid_size = layout_data["size"]
        roots = layout_data["r"]
        if "s" in layout_data:
            s = layout_data["s"]
        else:
            s = None
        for i, pair in enumerate(xy):
            x = pair[0] * canvas.canvas_xmax / grid_size[0]
            y = pair[1] * canvas.canvas_ymax / grid_size[1]
            a = (pair[0] + 0.5 * s[i]) / float(grid_size[0])
            b = (pair[1] + 0.5 * s[i]) / float(grid_size[1])
            r = roots[i]
            if s is None:
                scale = args.layout_scale
            else:
                scale = s[i] * args.layout_scale
            # print("Placing {} at {}, {} because {},{} and {}, {}".format(scale, x, y, canvas.canvas_xmax, canvas.canvas_ymax, grid_size[0], grid_size[1]))
            if args.passthrough:
                output_image = anchor_images[r]
                canvas.place_image(output_image, x, y, args.additive, scale=scale)
            else:
                if args.anchor_mine is not None or args.random_mine:
                    z = create_mine_canvas(args.rows, args.cols, b, a, anchors)
                elif anchor_offsets is not None:
                    z = apply_anchor_offsets(anchors[r], anchor_offsets, a, b, args.anchor_offset_a, args.anchor_offset_b)
                else:
                    z = anchors[r]

                if global_offset is not None:
                    z = z + global_offset
                # print("Storing {},{} with {}".format(x, y, len(z)))
                workq.append({
                        "z": z,
                        "x": x,
                        "y": y,
                        "s": scale
                    })

    elif args.mask_layout or args.mask_radius:
        if args.mask_layout:
            rawim = imread(args.mask_layout);
            if len(rawim.shape) == 2:
                im_height, im_width = rawim.shape
                mask_layout = rawim
            else:
                im_height, im_width, _ = rawim.shape
                mask_layout = rawim[:,:,0]
        else:
            im_height, im_width = args.mask_height, args.mask_width
            mask_layout = make_mask_layout(im_height, im_width, args.mask_radius)
        for xpos in range(im_width):
            for ypos in range(im_height):
                a = float(xpos) / (im_width - 1)
                if do_hex and ypos % 2 == 0:
                    a = a + 0.5 / (im_width - 1)
                x = args.mask_scale * canvas.xmax * a
                b = float(ypos) / (im_height - 1)
                y = args.mask_scale * canvas.ymax * b
                if not mask_layout[ypos][xpos] > 128:
                    pass
                elif args.passthrough:
                    if args.anchor_canvas:
                        cur_anchor_image = canvas.get_anchor(x, y, args.image_size)
                    else:
                        cur_anchor_image = anchor_images[0]
                    canvas.place_image(cur_anchor_image, x, y, args.additive, None)
                else:
                    if args.anchor_canvas:
                        cur_anchor_image = canvas.get_anchor(x, y, args.image_size)
                        zs = dmodel.encode_images([cur_anchor_image])
                        z = zs[0]
                    elif len(anchors) == 1 or anchor_offsets is not None:
                        z = apply_anchor_offsets(anchors[0], anchor_offsets, a, b, args.anchor_offset_a, args.anchor_offset_b)
                    else:
                        z = create_mine_canvas(args.rows, args.cols, b, a, anchors)

                    if global_offset is not None:
                        z = z + global_offset

                    workq.append({
                            "z": z,
                            "x": x,
                            "y": y,
                            "s": None
                        })

    while(len(workq) > 0):
        curq = workq[:args.batch_size]
        workq = workq[args.batch_size:]
        latents = [e["z"] for e in curq]
        images = dmodel.sample_at(np.array(latents))
        for i in range(len(curq)):
            # print("Placing {},{} with {}".format(curq[i]["x"], curq[i]["y"], len(latents)))
            canvas.place_image(images[i], curq[i]["x"], curq[i]["y"], args.additive, scale=curq[i]["s"])
            # print("Placed")

    template_dict["SIZE"] = args.image_size
    outfile = plat.sampling.emit_filename(args.save_path, template_dict, args);
    canvas.save(outfile)
Beispiel #9
0
def run_with_args(args, dmodel, cur_anchor_image, cur_save_path, cur_z_step, cur_basename="basename", range_data=None, template_dict={}):
    anchor_images = None
    anchor_labels = None
    if args.anchors:
        allowed = None
        prohibited = None
        include_targets = False
        if(args.allowed):
            include_targets = True
            allowed = map(int, args.allowed.split(","))
        if(args.prohibited):
            include_targets = True
            prohibited = map(int, args.prohibited.split(","))
        anchor_images = get_anchor_images(args.dataset, args.split, args.offset, args.stepsize, args.numanchors, allowed, prohibited, args.image_size, args.color_convert, include_targets=include_targets)
        if args.with_labels:
            anchor_labels = get_anchor_labels(args.dataset, args.split, args.offset, args.stepsize, args.numanchors)

    if args.anchor_glob is not None:
        files = plat.sampling.real_glob(args.anchor_glob)
        if args.offset > 0:
            files = files[args.offset:]
        if args.stepsize > 1:
            files = files[::args.stepsize]
        if args.numanchors is not None:
            files = files[:args.numanchors]
        anchor_images = anchors_from_filelist(files, args.channels)
        print("Read {} images from {} files".format(len(anchor_images), len(files)))
        print("First 5 files: ", files[:5])
        if len(anchor_images) == 0:
            print("No images, cannot contine")
            sys.exit(0)

    if cur_anchor_image is not None:
        # _, _, anchor_images = anchors_from_image(cur_anchor_image, channels=args.channels, image_size=(args.image_size, args.image_size))
        anchor_images = anchors_from_filelist([cur_anchor_image], channels=args.channels)
        if args.offset > 0:
            anchor_images = anchor_images[args.offset:]
        if args.stepsize > 0:
            anchor_images = anchor_images[::args.stepsize]
        if args.numanchors is not None:
            anchor_images = anchor_images[:args.numanchors]

    # at this point we can make a dummy anchor_labels if we need
    if anchor_images is not None and anchor_labels is None:
        anchor_labels = [None] * len(anchor_images)

    if args.passthrough:
        # determine final filename string
        image_size = anchor_images[0].shape[1]
        save_path = plat.sampling.emit_filename(cur_save_path, {}, args);
        print("Preparing image file {}".format(save_path))
        img = grid2img(anchor_images, args.rows, args.cols, not args.tight)
        img.save(save_path)
        sys.exit(0)

    if dmodel is None:
        dmodel = zoo.load_model(args.model, args.model_file, args.model_type, args.model_interface)

    if args.seed is not None:
        print("Setting random seed to ", args.seed)
        np.random.seed(args.seed)
        random.seed(args.seed)
    else:
        np.random.seed(None)
        random.seed(None)

    embedded = None
    if anchor_images is not None:
        x_queue = anchor_images[:]
        c_queue = anchor_labels[:]
        anchors = None
        # print("========> ENCODING {} at a time".format(args.batch_size))
        while(len(x_queue) > 0):
            cur_x = x_queue[:args.batch_size]
            cur_c = c_queue[:args.batch_size]
            x_queue = x_queue[args.batch_size:]
            c_queue = c_queue[args.batch_size:]
            # TODO: remove vestiges of conditional encode/decode
            # encoded = dmodel.encode_images(cur_x, cur_c)
            encoded = dmodel.encode_images(cur_x)
            try:
                emb_l = dmodel.embed_labels(cur_c)
            except AttributeError:
                emb_l = [None] * args.batch_size
            if anchors is None:
                anchors = encoded
                embedded = emb_l
            else:
                anchors = np.concatenate((anchors, encoded), axis=0)
                embedded = np.concatenate((embedded, emb_l), axis=0)

        # anchors = dmodel.encode_images(anchor_images)
    elif args.anchor_vectors is not None:
        anchors = get_json_vectors(args.anchor_vectors)
        # print("Read vectors: ", anchors.shape)
        vsize = anchors.shape[-1]
        anchors = anchors.reshape([-1, vsize])
        print("Read vectors: ", anchors.shape)
    else:
        anchors = None

    if args.invert_anchors:
        anchors = -1 * anchors

    if args.encoder:
        if anchors is not None:
            plat.sampling.output_vectors(anchors, args.save_path)
        else:
            plat.sampling.stream_output_vectors(dmodel, args.dataset, args.split, args.save_path, batch_size=args.batch_size)
        sys.exit(0)

    global_offset = None
    if args.anchor_offset is not None:
        # compute anchors as offsets from existing anchor
        offsets = get_json_vectors_list(args.anchor_offset)
        if args.anchor_wave:
            anchors = plat.sampling.anchors_wave_offsets(anchors, offsets, args.rows, args.cols, args.spacing,
                args.radial_wave, args.clip_wave, cur_z_step, args.anchor_offset_x,
                args.anchor_offset_x_minscale, args.anchor_offset_x_maxscale)
        elif args.anchor_noise:
            anchors = plat.sampling.anchors_noise_offsets(anchors, offsets, args.rows, args.cols, args.spacing,
                cur_z_step, args.anchor_offset_x, args.anchor_offset_y,
                args.anchor_offset_x_minscale, args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale)
        elif range_data is not None:
            anchors = plat.sampling.anchors_json_offsets(anchors, offsets, args.rows, args.cols, args.spacing,
                cur_z_step, args.anchor_offset_x, args.anchor_offset_y,
                args.anchor_offset_x_minscale, args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale,
                range_data)
        else:
            anchors = plat.sampling.anchors_from_offsets(anchors[0], offsets, args.anchor_offset_x, args.anchor_offset_y,
                args.anchor_offset_x_minscale, args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale)

    if args.global_offset is not None:
        offsets = get_json_vectors(args.global_offset)
        if args.global_ramp:
            offsets = cur_z_step * offsets
        global_offset =  plat.sampling.get_global_offset(offsets, args.global_indices, args.global_scale)

    z_dim = dmodel.get_zdim()
    # I don't remember what partway/encircle do so they are not handling the chain layout
    # this handles the case (at least) of mines with random anchors
    if (args.partway is not None) or args.encircle or (anchors is None):
        srows=((args.rows // args.spacing) + 1)
        scols=((args.cols // args.spacing) + 1)
        rand_anchors = plat.sampling.generate_latent_grid(z_dim, rows=srows, cols=scols, fan=False, gradient=False,
            spherical=False, gaussian=False, anchors=None, anchor_images=None, mine=False, chain=False,
            spacing=args.spacing, analogy=False, rand_uniform=args.uniform)
        if args.partway is not None:
            l = len(rand_anchors)
            clipped_anchors = anchors[:l]
            anchors = (1.0 - args.partway) * rand_anchors + args.partway * clipped_anchors
        elif args.encircle:
            anchors = surround_anchors(srows, scols, anchors, rand_anchors)
        else:
            anchors = rand_anchors
    z = plat.sampling.generate_latent_grid(z_dim, args.rows, args.cols, args.fan, args.gradient, not args.linear, args.gaussian,
            anchors, anchor_images, True, args.chain, args.spacing, args.analogy)
    if args.write_anchors:
        plat.sampling.output_vectors(anchors, "anchors.json")

    if global_offset is not None:
        z = z + global_offset

    template_dict["BASENAME"] = cur_basename
    # emb_l = None
    # emb_l = [None] * len(z)
    embedded_labels = None
    # TODO: this could be more elegant
    if embedded is not None and embedded[0] is not None:
        if args.clone_label is not None:
            embedded_labels = np.tile(embedded[args.clone_label], [len(z), 1])
        else:
            embedded_labels = plat.sampling.generate_latent_grid(z_dim, args.rows, args.cols, args.fan, args.gradient, not args.linear, args.gaussian,
                    embedded, anchor_images, True, args.chain, args.spacing, args.analogy)

    #TODO - maybe not best way to check if labels are valid
    # if anchor_labels is None or anchor_labels[0] is None:
    #     emb_l = [None] * len(z)
    plat.sampling.grid_from_latents(z, dmodel, args.rows, args.cols, anchor_images, args.tight, args.shoulders, cur_save_path, args, args.batch_size, template_dict=template_dict, emb_l=embedded_labels)
    return dmodel
Beispiel #10
0
def atvec(parser, context, args):
    parser.add_argument('--dataset', dest='dataset', default=None,
                        help="Source dataset (for labels).")
    # memo: --labels became --attributes when --classes was added
    parser.add_argument('--attributes', dest='attributes', default=None,
                        help="Text file with 0/1 labels.")
    parser.add_argument('--classes', dest='classes', default=None,
                        help="Text file with 0/1/2/.../num-classes-1 labels.")
    parser.add_argument('--split', dest='split', default="train",
                        help="Which split to use from the dataset (train/nontrain/valid/test/any).")
    parser.add_argument("--num-attribs", dest='num_attribs', type=int, default=40,
                        help="Number of attributes (labes)")
    parser.add_argument("--which-attribs", type=str, default=None,
                        help="optional comma separated list of attributes to run")
    parser.add_argument("--num-classes", dest='num_classes', type=int, default=None,
                        help="For multiclass, number of classes (assumed 0 .. n-1)")
    parser.add_argument("--z-dim", dest='z_dim', type=int, default=100,
                        help="z dimension of vectors")
    parser.add_argument("--encoded-vectors", type=str, default=None,
                        help="Comma separated list of json arrays")
    parser.add_argument("--encoded-true", type=str, default=None,
                        help="Comma separated list of json arrays (true)")
    parser.add_argument("--encoded-false", type=str, default=None,
                        help="Comma separated list of json arrays (false)")
    parser.add_argument('--thresh', dest='thresh', default=False, action='store_true',
                        help="Compute thresholds for attribute vectors classifiers")
    parser.add_argument('--svm', dest='svm', default=False, action='store_true',
                        help="Use SVM for computing attribute vectors")
    parser.add_argument("--limit", dest='limit', type=int, default=None,
                        help="Limit number of inputs when computing atvecs")
    parser.add_argument('--roc', dest='roc', default=False, action='store_true',
                        help="ROC curve of selected attribute vectors")
    parser.add_argument("--attribute-vectors", dest='attribute_vectors', default=None,
                        help="use json file as source of attribute vectors")
    parser.add_argument("--attribute-thresholds", dest='attribute_thresholds', default=None,
                        help="use these non-zero values for binary classifier thresholds")
    parser.add_argument("--attribute-set", dest='attribute_set', default="all",
                        help="score ROC/accuracy against true/false/all")
    parser.add_argument('--attribute-indices', dest='attribute_indices', default=None, type=str,
                        help="indices to select specific attribute vectors")
    parser.add_argument("--balanced2", dest='balanced2', type=str, default=None,
                        help="Balanced two attributes and generate atvec. eg: 20,31")
    parser.add_argument("--balanced", dest='balanced', type=str, default=None,
                        help="Balance attributes and generate atvec. eg: 20,21,31")
    parser.add_argument("--avg-diff", dest='avg_diff', type=str, default=None,
                        help="Two lists of vectors to average and then diff")
    parser.add_argument("--svm-diff", dest='svm_diff', type=str, default=None,
                        help="Two lists of vectors to average and then svm diff")
    parser.add_argument('--outfile', dest='outfile', default=None,
                        help="Output json file for vectors.")
    args = parser.parse_args(args)

    if args.avg_diff:
        vecs1, vecs2 = args.avg_diff.split(",")
        encoded1 = json_list_to_array(vecs1)
        encoded2 = json_list_to_array(vecs2)
        print("Taking the difference between {} and {} vectors".format(len(encoded1), len(encoded2)))
        m1 = np.mean(encoded1,axis=0)
        m2 = np.mean(encoded2,axis=0)
        atvec = m2 - m1
        z_dim, = atvec.shape
        atvecs = atvec.reshape(1,z_dim)
        print("Computed diff shape: {}".format(atvecs.shape))
        if args.outfile is not None:
            save_json_attribs(atvecs, args.outfile)
        sys.exit(0)

    if args.svm_diff:
        vecs1, vecs2 = args.svm_diff.split(",")
        encoded1 = json_list_to_array(vecs1)
        encoded2 = json_list_to_array(vecs2)
        print("Taking the svm difference between {} and {} vectors".format(len(encoded1), len(encoded2)))
        h = .02  # step size in the mesh
        C = 1.0  # SVM regularization parameter
        X_arr = []
        y_arr = []
        for l in range(len(encoded1)):
            X_arr.append(encoded1[l])
            y_arr.append(False)
        for l in range(len(encoded2)):
            X_arr.append(encoded2[l])
            y_arr.append(True)
        X = np.array(X_arr)
        y = np.array(y_arr)
        # svc = svm.LinearSVC(C=C, class_weight="balanced").fit(X, y)
        svc = svm.LinearSVC(C=C).fit(X, y)
        # get the separating hyperplane
        w = svc.coef_[0]

        #FIXME: this is a scaling hack.
        m1 = np.mean(encoded1,axis=0)
        m2 = np.mean(encoded2,axis=0)
        mean_vector = m1 - m2
        mean_length = np.linalg.norm(mean_vector)
        svn_length = np.linalg.norm(w)

        atvec = (mean_length / svn_length)  * w
        z_dim, = atvec.shape
        atvecs = atvec.reshape(1,z_dim)
        print("Computed svm diff shape: {}".format(atvecs.shape))
        if args.outfile is not None:
            save_json_attribs(atvecs, args.outfile)
        sys.exit(0)

    print("reading encoded vectors...")
    attribs = None
    if args.encoded_vectors is not None:
        if args.encoded_vectors.endswith("json"):
            encoded = json_list_to_array(args.encoded_vectors)
            print("Read json array: {}".format(encoded.shape))
        else:
            encoded = np.load(args.encoded_vectors)['arr_0']
            print("Read numpy array: {}".format(encoded.shape))
    else:
        if args.encoded_true.endswith("json"):
            encoded_true = json_list_to_array(args.encoded_true)
            print("Read true json array: {}".format(encoded_true.shape))
        else:
            encoded_true = np.load(args.encoded_true)['arr_0']
            print("Read true numpy array: {}".format(encoded_true.shape))
        if args.encoded_false.endswith("json"):
            encoded_false = json_list_to_array(args.encoded_false)
            print("Read false json array: {}".format(encoded_false.shape))
        else:
            encoded_false = np.load(args.encoded_false)['arr_0']
            print("Read false numpy array: {}".format(encoded_false.shape))
        encoded = np.concatenate((encoded_true, encoded_false), axis=0)
        num_true = len(encoded_true)
        num_false = len(encoded_false)
        true_values = np.ones(shape=[num_true,1,1], dtype=np.int)
        false_values = np.zeros(shape=[num_false,1,1], dtype=np.int)
        attribs = np.concatenate((true_values, false_values), axis=0)

    if args.limit is not None:
        encoded = encoded[:args.limit]
    num_rows, z_dim = encoded.shape
    if attribs is None:
        print("reading attributes...")
        if args.dataset:
            attribs = np.array(list(get_dataset_iterator(args.dataset, args.split, include_features=False, include_targets=True)))
            print("Read attributes from dataset: {}".format(attribs.shape))
        elif args.attributes is not None:
            print("Read attributes from file: {}".format(args.attributes))
            attribs = get_attribs_from_files(args.attributes)
        elif args.classes is not None:
            print("Read attributes from file: {}".format(args.classes))
            attribs = get_attribs_from_class_file(args.classes, args.num_classes)
        else:
            print("Don't know how to get labels: try --attributes or --classes")
            sys.exit(1)

    if args.which_attribs is not None:
        attribs = filter_attributes(attribs, args.which_attribs)
    print("encoded vectors: {}, attributes: {} ".format(encoded.shape, attribs.shape))

    if args.roc:
        atvecs = get_json_vectors(args.attribute_vectors)
        dim = len(atvecs[0])
        chosen_vector = offset_from_string(args.attribute_indices, atvecs, dim)
        if args.attribute_thresholds is not None:
            atvec_thresholds = get_json_vectors(args.attribute_thresholds)
            threshold = atvec_thresholds[0][int(args.attribute_indices)]
        else:
            threshold = None
        do_roc(chosen_vector, encoded, attribs, int(args.attribute_indices), threshold, args.attribute_set, args.outfile, isclass=False)
        # do_roc(chosen_vector, encoded, attribs, int(args.attribute_indices), threshold, args.attribute_set, args.outfile, isclass=(args.num_classes is not None))
        sys.exit(0)

    if args.thresh:
        atvecs = get_json_vectors(args.attribute_vectors)
        do_thresh(atvecs, encoded, attribs, args.outfile, isclass=(args.num_classes is not None))
        sys.exit(0)

    if(args.balanced2):
        indexes = map(int, args.balanced2.split(","))
        with_attr, without_attr = get_balanced_averages2(attribs, encoded, indexes[0], indexes[1]);
        num_attribs = 2
    elif(args.balanced):
        indexes = map(int, args.balanced.split(","))
        with_attr, without_attr = get_balanced_averages(attribs, encoded, indexes);
        num_attribs = len(indexes)
    # I can't remember why
    # elif args.num_classes is not None:
    #     with_attr, without_attr = get_class_averages(attribs, encoded, args.num_classes);
    #     num_attribs = args.num_classes
    elif args.num_attribs is not None:
        with_attr, without_attr = get_averages(attribs, encoded);
        num_attribs = args.num_attribs
    else:
        print("I think we need either num_classes or num_attribs or something")
        sys.exit(0);

    if args.svm:
        atvects = averages_to_svm_attribute_vectors(with_attr, without_attr)
    else:
        atvects = averages_to_attribute_vectors(with_attr, without_attr)
    print("Computed atvecs shape: {}".format(atvects.shape))

    if args.outfile is not None:
        save_json_attribs(atvects, args.outfile)
Beispiel #11
0
def main(cliargs):
    parser = argparse.ArgumentParser(description="Plot model samples")
    parser.add_argument("--interface",
                        dest='model_class',
                        type=str,
                        default="plat.interface.discgen.DiscGenModel",
                        help="class encapsulating model")
    parser.add_argument("--model",
                        dest='model',
                        type=str,
                        default=None,
                        help="path to the saved model")
    parser.add_argument("--width",
                        type=int,
                        default=512,
                        help="width of canvas to render in pixels")
    parser.add_argument("--height",
                        type=int,
                        default=512,
                        help="height of canvas to render in pixels")
    parser.add_argument("--rows",
                        type=int,
                        default=3,
                        help="number of rows of anchors")
    parser.add_argument("--cols",
                        type=int,
                        default=3,
                        help="number of columns of anchors")
    parser.add_argument("--xmin",
                        type=int,
                        default=0,
                        help="min x in virtual space")
    parser.add_argument("--xmax",
                        type=int,
                        default=100,
                        help="max x in virtual space")
    parser.add_argument("--ymin",
                        type=int,
                        default=0,
                        help="min y in virtual space")
    parser.add_argument("--ymax",
                        type=int,
                        default=100,
                        help="max y in virtual space")
    parser.add_argument("--save-path",
                        type=str,
                        default="out.png",
                        help="where to save the generated samples")
    parser.add_argument("--seed",
                        type=int,
                        default=None,
                        help="Optional random seed")
    parser.add_argument('--anchor-image',
                        dest='anchor_image',
                        default=None,
                        help="use image as source of anchors")
    parser.add_argument('--anchor-mine',
                        dest='anchor_mine',
                        default=None,
                        help="use image as single source of mine coordinates")
    parser.add_argument(
        '--random-mine',
        dest='random_mine',
        default=False,
        action='store_true',
        help="use random sampling as source of mine coordinates")
    parser.add_argument('--additive',
                        dest='additive',
                        default=False,
                        action='store_true',
                        help="use additive compositing")
    parser.add_argument(
        '--mask-name',
        dest='mask_name',
        default="rounded",
        help="prefix name for alpha mask to use (full/rounded/hex")
    parser.add_argument('--mask-layout',
                        dest='mask_layout',
                        default=None,
                        help="use image as source of mine grid points")
    parser.add_argument('--mask-scale',
                        dest='mask_scale',
                        default=1.0,
                        type=float,
                        help="Scale mask layout (squeeze)")
    parser.add_argument('--mask-width',
                        dest='mask_width',
                        type=int,
                        default=15,
                        help="width for computed mask")
    parser.add_argument('--mask-height',
                        dest='mask_height',
                        type=int,
                        default=15,
                        help="height for computed mask")
    parser.add_argument('--mask-radius',
                        dest='mask_radius',
                        default=None,
                        type=float,
                        help="radius for computed mask")
    parser.add_argument('--layout',
                        dest='layout',
                        default=None,
                        help="layout json file")
    parser.add_argument('--batch-size',
                        dest='batch_size',
                        type=int,
                        default=100,
                        help="number of images to decode at once")
    parser.add_argument('--passthrough',
                        dest='passthrough',
                        default=False,
                        action='store_true',
                        help="Use originals instead of reconstructions")
    parser.add_argument('--anchor-offset',
                        dest='anchor_offset',
                        default=None,
                        help="use json file as source of each anchors offsets")
    parser.add_argument('--anchor-offset-a',
                        dest='anchor_offset_a',
                        default="42",
                        type=str,
                        help="which indices to combine for offset a")
    parser.add_argument('--anchor-offset-b',
                        dest='anchor_offset_b',
                        default="31",
                        type=str,
                        help="which indices to combine for offset b")
    parser.add_argument("--image-size",
                        dest='image_size',
                        type=int,
                        default=64,
                        help="size of (offset) images")
    args = parser.parse_args(cliargs)

    if args.seed:
        np.random.seed(args.seed)
        random.seed(args.seed)

    anchor_images = None
    if args.anchor_image is not None:
        _, _, anchor_images = anchors_from_image(args.anchor_image,
                                                 image_size=(args.image_size,
                                                             args.image_size))
    elif args.anchor_mine is not None:
        _, _, anchor_images = anchors_from_image(args.anchor_mine,
                                                 image_size=(args.image_size,
                                                             args.image_size))

    anchors = None
    if not args.passthrough:
        model_class_parts = args.model_class.split(".")
        model_class_name = model_class_parts[-1]
        model_module_name = ".".join(model_class_parts[:-1])
        print("Loading {} interface from {}".format(model_class_name,
                                                    model_module_name))
        ModelClass = getattr(importlib.import_module(model_module_name),
                             model_class_name)
        print("Loading model from {}".format(args.model))
        dmodel = ModelClass(filename=args.model)

        if anchor_images is not None:
            anchors = dmodel.encode_images(anchor_images)

    if anchors is None:
        anchors = np.random.normal(loc=0,
                                   scale=1,
                                   size=(args.cols * args.rows, 100))

    anchor_offsets = None
    if args.anchor_offset is not None:
        # compute anchors as offsets from existing anchor
        anchor_offsets = get_json_vectors(args.anchor_offset)

    canvas = Canvas(args.width, args.height, args.xmin, args.xmax, args.ymin,
                    args.ymax, args.mask_name, args.image_size)
    workq = []

    do_hex = True

    if args.layout:
        with open(args.layout) as json_file:
            layout_data = json.load(json_file)
        xy = np.array(layout_data["xy"])
        roots = layout_data["r"]
        for i, pair in enumerate(xy):
            x = pair[0] * canvas.xmax
            y = pair[1] * canvas.ymax
            a = pair[0]
            b = pair[1]
            r = roots[i]
            if args.passthrough:
                output_image = anchor_images[r]
                canvas.place_image(output_image, x, y, args.additive)
            else:
                if args.anchor_mine is not None or args.random_mine:
                    z = create_mine_canvas(args.rows, args.cols, b, a, anchors)
                elif anchor_offsets is not None:
                    z = apply_anchor_offsets(anchors[r], anchor_offsets, a, b,
                                             args.anchor_offset_a,
                                             args.anchor_offset_b)
                else:
                    z = anchors[r]
                workq.append({"z": z, "x": x, "y": y})

    elif args.mask_layout or args.mask_radius:
        if args.mask_layout:
            rawim = imread(args.mask_layout)
            if len(rawim.shape) == 2:
                im_height, im_width = rawim.shape
                mask_layout = rawim
            else:
                im_height, im_width, _ = rawim.shape
                mask_layout = rawim[:, :, 0]
        else:
            im_height, im_width = args.mask_height, args.mask_width
            mask_layout = make_mask_layout(im_height, im_width,
                                           args.mask_radius)
        for xpos in range(im_width):
            for ypos in range(im_height):
                a = float(xpos) / (im_width - 1)
                if do_hex and ypos % 2 == 0:
                    a = a + 0.5 / (im_width - 1)
                x = args.mask_scale * canvas.xmax * a
                b = float(ypos) / (im_height - 1)
                y = args.mask_scale * canvas.ymax * b
                if not mask_layout[ypos][xpos] > 128:
                    pass
                elif args.passthrough:
                    output_image = anchor_images[0]
                    canvas.place_image(output_image, x, y, args.additive)
                else:
                    if len(anchors) == 1 or anchor_offsets is not None:
                        z = apply_anchor_offsets(anchors[0], anchor_offsets, a,
                                                 b, args.anchor_offset_a,
                                                 args.anchor_offset_b)
                    else:
                        z = create_mine_canvas(args.rows, args.cols, b, a,
                                               anchors)
                    workq.append({"z": z, "x": x, "y": y})

    while (len(workq) > 0):
        curq = workq[:args.batch_size]
        workq = workq[args.batch_size:]
        latents = [e["z"] for e in curq]
        images = dmodel.sample_at(np.array(latents))
        for i in range(len(curq)):
            canvas.place_image(images[i], curq[i]["x"], curq[i]["y"],
                               args.additive)

    canvas.save(args.save_path)
Beispiel #12
0
def run_with_args(args, dmodel, cur_anchor_image, cur_save_path, cur_z_step):
    if args.seed is not None:
        np.random.seed(args.seed)
        random.seed(args.seed)

    anchor_images = None
    if args.anchors:
        _, get_anchor_images = lazy_init_fuel_dependencies()
        allowed = None
        prohibited = None
        include_targets = False
        if (args.allowed):
            include_targets = True
            allowed = map(int, args.allowed.split(","))
        if (args.prohibited):
            include_targets = True
            prohibited = map(int, args.prohibited.split(","))
        anchor_images = get_anchor_images(args.dataset,
                                          args.split,
                                          args.offset,
                                          args.stepsize,
                                          args.numanchors,
                                          allowed,
                                          prohibited,
                                          args.image_size,
                                          args.color_convert,
                                          include_targets=include_targets)

    if cur_anchor_image is not None:
        _, _, anchor_images = anchors_from_image(cur_anchor_image,
                                                 image_size=(args.image_size,
                                                             args.image_size))
        if args.offset > 0:
            anchor_images = anchor_images[args.offset:]
        # untested
        if args.numanchors is not None:
            anchor_images = anchor_images[:args.numanchors]

    if args.passthrough:
        print('Preparing image grid...')
        img = grid2img(anchor_images, args.rows, args.cols, not args.tight)
        img.save(cur_save_path)
        sys.exit(0)

    if dmodel is None:
        model_class_parts = args.model_class.split(".")
        model_class_name = model_class_parts[-1]
        model_module_name = ".".join(model_class_parts[:-1])
        print("Loading {} interface from {}".format(model_class_name,
                                                    model_module_name))
        ModelClass = getattr(importlib.import_module(model_module_name),
                             model_class_name)
        print("Loading model from {}".format(args.model))
        dmodel = ModelClass(filename=args.model)

    if anchor_images is not None:
        x_queue = anchor_images[:]
        anchors = None
        # print("========> ENCODING {} at a time".format(args.batch_size))
        while (len(x_queue) > 0):
            cur_x = x_queue[:args.batch_size]
            x_queue = x_queue[args.batch_size:]
            encoded = dmodel.encode_images(cur_x)
            if anchors is None:
                anchors = encoded
            else:
                anchors = np.concatenate((anchors, encoded), axis=0)

        # anchors = dmodel.encode_images(anchor_images)
    elif args.anchor_vectors is not None:
        anchors = get_json_vectors(args.anchor_vectors)
    else:
        anchors = None

    if args.invert_anchors:
        anchors = -1 * anchors

    if args.encoder:
        if anchors is not None:
            output_vectors(anchors)
        else:
            stream_output_vectors(dmodel,
                                  args.dataset,
                                  args.split,
                                  batch_size=args.batch_size)
        sys.exit(0)

    global_offset = None
    if args.anchor_offset is not None:
        # compute anchors as offsets from existing anchor
        offsets = get_json_vectors(args.anchor_offset)
        if args.anchor_noise:
            anchors = anchors_noise_offsets(
                anchors, offsets, args.rows, args.cols, args.spacing,
                cur_z_step, args.anchor_offset_x, args.anchor_offset_y,
                args.anchor_offset_x_minscale, args.anchor_offset_y_minscale,
                args.anchor_offset_x_maxscale, args.anchor_offset_y_maxscale)
        else:
            anchors = anchors_from_offsets(
                anchors[0], offsets, args.anchor_offset_x,
                args.anchor_offset_y, args.anchor_offset_x_minscale,
                args.anchor_offset_y_minscale, args.anchor_offset_x_maxscale,
                args.anchor_offset_y_maxscale)

    if args.global_offset is not None:
        offsets = get_json_vectors(args.global_offset)
        if args.global_ramp:
            offsets = cur_z_step * offsets
        global_offset = get_global_offset(offsets, args.global_indices,
                                          args.global_scale)

    z_dim = dmodel.get_zdim()
    # I don't remember what partway/encircle do so they are not handling the chain layout
    # this handles the case (at least) of mines with random anchors
    if (args.partway is not None) or args.encircle or (args.mine
                                                       and anchors is None):
        srows = ((args.rows // args.spacing) + 1)
        scols = ((args.cols // args.spacing) + 1)
        rand_anchors = generate_latent_grid(z_dim,
                                            rows=srows,
                                            cols=scols,
                                            fan=False,
                                            gradient=False,
                                            spherical=False,
                                            gaussian=False,
                                            anchors=None,
                                            anchor_images=None,
                                            mine=False,
                                            chain=False,
                                            spacing=args.spacing,
                                            analogy=False,
                                            rand_uniform=args.uniform)
        if args.partway is not None:
            l = len(rand_anchors)
            clipped_anchors = anchors[:l]
            anchors = (1.0 - args.partway
                       ) * rand_anchors + args.partway * clipped_anchors
        elif args.encircle:
            anchors = surround_anchors(srows, scols, anchors, rand_anchors)
        else:
            anchors = rand_anchors
    z = generate_latent_grid(z_dim, args.rows, args.cols, args.fan,
                             args.gradient, not args.linear, args.gaussian,
                             anchors, anchor_images, args.mine, args.chain,
                             args.spacing, args.analogy)
    if global_offset is not None:
        z = z + global_offset

    grid_from_latents(z, dmodel, args.rows, args.cols, anchor_images,
                      args.tight, args.shoulders, cur_save_path,
                      args.batch_size)
    return dmodel