예제 #1
0
def build_network(input, rgen):

    input = tf.reshape(input, shape=[-1, INPUT_SIZE * INPUT_SIZE])
    # print(input.get_shape().as_list())

    wdev = 0.5
    bdev = 0.5

    dense = nn.dense_layer(
        input,
        32,
        activation=tf.nn.tanh,
        wdev=wdev,
        bdev=bdev,
        rgen=r.bind_generator_from(rgen),
    )

    for i in range(8):
        dense = nn.dense_layer(
            dense,
            32,
            activation=tf.nn.tanh,
            wdev=wdev,
            bdev=bdev,
            rgen=r.bind_generator_from(rgen),
        )

    dense = nn.dense_layer(dense,
                           3,
                           wdev=wdev,
                           bdev=bdev,
                           rgen=r.bind_generator_from(rgen),
                           activation=tf.nn.tanh)

    return dense
예제 #2
0
    def generate_full_image(color_string, seed):
        r.init_def_generator(seed)

        rkey = r.bind_generator()
        lp = len(PS)
        guard_size = 450
        image = np.zeros((HEIGHT + guard_size * 2, WIDTH + guard_size * 2))

        # num_circles = r.choice_from(rkey,[5,10,15,20,25])
        # num_circles = r.choice_from(rkey,[30,40,50,60,70,80,90,100])
        # num_circles = r.choice_from(rkey,[80,120,140])
        # num_circles = r.choice_from(rkey,[200,220,240])
        num_circles = 10
        ps = r.choice_from(rkey, PS, lp)
        for i in range(num_circles):

            loopkey = r.bind_generator_from(rkey)
            band_size = r.choice_from(loopkey, [10] + [15])
            circle = gradient_circle(band_size, PS[::-1] + PS[::2] + PS[1::2],
                                     r.bind_generator_from(loopkey))

            cheight, cwidth = circle.shape

            xstart = r.choice_from(loopkey, np.arange(WIDTH + 250))
            ystart = r.choice_from(loopkey, np.arange(HEIGHT + 250))

            image[ystart:ystart + cheight, xstart:xstart + cwidth] += circle

        image = image[guard_size:HEIGHT + guard_size,
                      guard_size:WIDTH + guard_size]
        image /= np.max(image)

        return data.upscale_nearest(data.prepare_image_for_export(image * 255),
                                    ny=UPSCALE_FACTOR_Y,
                                    nx=UPSCALE_FACTOR_X)
예제 #3
0
def dense_layer(input,size,rgen,activation=None,wdev=0.01,bdev=0.01):

    w = create_variable(
        [input.shape.as_list()[1],size],name='w',dev=wdev,
        rgen=r.bind_generator_from(rgen))
    b = create_variable(
        [size],name='b',dev=bdev,
        rgen=r.bind_generator_from(rgen))

    layer = tf.add(tf.matmul(input, w), b)

    if activation is not None:
        layer = activation(layer)

    return layer
예제 #4
0
def generate_neuron_images(color_string, seed):

    r.init_def_generator(seed)
    rgen = r.bind_generator()

    model = file.load_nnet(rgen, prefix=seed, custom_objects=CUSTOM_OBJECTS)
    print(model.summary())

    ly = np.linspace(-1, 1, num=HEIGHT)
    lx = np.linspace(-1, 1, num=WIDTH)
    yy, xx = np.meshgrid(ly, lx)

    nnet_input = generate_nnet_input(xx, yy, r.bind_generator_from(rgen))

    # layers_of_interest = model.layers[1:-1]
    # print(layers_of_interest)

    layer = model.get_layer(EXPLORE_LAYER)
    print(layer.weights)
    model_inter = keras.models.Model(inputs=model.input, outputs=layer.output)

    activations = model_inter.predict(nnet_input)

    print(activations.shape)
    num_images = activations.shape[1]
    images = []
    for i in range(num_images):
        image = activations[:, i]
        image = image.reshape([HEIGHT, WIDTH])
        # print(np.max(image),np.min(image))
        # image = np.clip(image+0.5,0,1)
        images += [image]

    return images
예제 #5
0
def generate_full_image(color_string, seed):
    r.init_def_generator(seed)
    rgen = r.bind_generator()

    input = tf.placeholder('float32', shape=[None, INPUT_SIZE, INPUT_SIZE])
    network = build_network(input=input, rgen=r.bind_generator_from(rgen))

    print(network.get_shape())

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)

        scale = 2
        ly1 = np.linspace(-1 * scale, 1 * scale, num=HEIGHT)
        ly2 = np.linspace(1 * scale, -1 * scale, num=HEIGHT)
        lx1 = np.linspace(-1 * scale, 1 * scale, num=WIDTH)
        lx2 = np.linspace(1 * scale, -1 * scale, num=WIDTH)

        yy1, xx1 = np.meshgrid(ly1, lx1)
        yy2, xx2 = np.meshgrid(ly2, lx2)

        f = np.c_[yy1.flatten(), xx1.flatten(), yy2.flatten(), xx2.flatten()]
        f = f.reshape([-1, 2, 2])

        output, = sess.run([network], {input: f})

    print(output.shape)
    image = output.reshape([HEIGHT, WIDTH, 3])
    print(np.max(image), np.min(image))
    image = np.clip(image, -1, 1) / 2 + 0.5
    return image
예제 #6
0
def generate_full_image(color_string, seed):
    r.init_def_generator(seed)

    image = np.zeros((HEIGHT, WIDTH, 3))
    plots = []
    loop_key = r.bind_generator()
    setup_key = r.bind_generator()

    post_process = lambda x: data.integrate_series(
        x, n=2, mean_influences=[0, 0])

    pup = m.SimpleProgression(values=1,
                              self_length=[10, 40, 50],
                              post_process=post_process)
    pdown = m.SimpleProgression(values=-1,
                                self_length=[10, 40, 50],
                                post_process=post_process)
    arc = m.RandomMarkovModel(values=[pup, pdown], self_length=[2])

    p = m.RandomMarkovModel(
        # values=[p1, p2, arc],
        values=[arc],
        parent_rkey=r.bind_generator_from(setup_key))

    # for i in range(-30,30):
    for i in range(1):

        sample = m.sample_markov_hierarchy(p, 1000)
        sample = data.integrate_series(sample, 1, mean_influences=1)
        # sample = data.integrate_series(sample,1,mean_influences=0)
        # sample -= np.min(sample)
        # sample = data.integrate_series(sample,1)

        # slices = [ np.s_[:50],np.s_[50:100], np.s_[100:] ]
        # for slice in slices:
        #     sample[slice] -= np.mean(sample[slice])
        # sample = data.integrate_series(sample,1)
        # sample[:60+i] -= np.mean(sample[:60+i])
        # sample[60+i:] -= np.mean(sample[60+i:])
        # sample = data.integrate_series(sample,1)
        plots += [sample]

    plt.plot(plots[0])
    mng = plt.get_current_fig_manager()
    mng.full_screen_toggle()
    plt.show()
    # viz.animate_plots_y(plots)

    return image
예제 #7
0
    def generate_full_image(color_string,seed):
        r.init_def_generator(seed)

        rkey = r.bind_generator()
        guard_size = 200
        image = np.zeros((HEIGHT+guard_size*2,WIDTH+guard_size*2))

        num_circles = r.choice_from(rkey,[5,10,15,20,25])
        # num_circles = r.choice_from(rkey,[30,40,50,60,70,80,90,100])
        # num_circles = r.choice_from(rkey,[400,500,600,700,800])
        for i in range(num_circles):

            loopkey = r.bind_generator_from(rkey)
            band_size = r.choice_from(loopkey,np.arange(5,10))
            circle = gradient_circle(
                band_size,
                r.bind_generator_from(loopkey)
            )

            cheight,cwidth = circle.shape

            xstart = r.choice_from(loopkey,np.arange(WIDTH+100))
            ystart = r.choice_from(loopkey,np.arange(HEIGHT+100))
            image[ystart:ystart+cheight,xstart:xstart+cwidth] += circle

        image /= np.max(image)
        image = image[
                guard_size:HEIGHT+guard_size,
                guard_size:WIDTH+guard_size]


        return  data.upscale_nearest(
            data.prepare_image_for_export(image*255),
            ny=UPSCALE_FACTOR_Y,
            nx=UPSCALE_FACTOR_X
        )
예제 #8
0
def simulate_markov_generator(node, length=None, rkey=None):

    breakout = False
    iteration = 0
    # if length is None, this means we are simulating the parent
    # if length is -1, this means no length for the children has been specified
    #   and in this case we use the self_length
    sim_length = 10 if length is None else length
    if sim_length == -1:
        sim_length = r.choice_from(rkey, node.self_length)
    while breakout == False:

        loop_key = r.bind_generator_from(rkey)

        simulation = r.call_and_bind_from(loop_key,
                                          simulate_markov_chain,
                                          node=node,
                                          length=sim_length)

        if node.lengths is not None:
            if len(node.lengths) == 1:
                repeats = [node.lengths[0] for _ in simulation]
            else:
                repeats = [node.lengths[s] for s in simulation]
            simulation = np.repeat(simulation, repeats)
        vals = [node.values[i] for i in simulation]
        if node.post_process is not None:
            vals = node.post_process(vals)

        if node.leaf is False:
            child_lens = r.choice_from(loop_key, node.child_lengths,
                                       sim_length)
            for j, (n, l) in enumerate(zip(vals, child_lens)):
                m = simulate_markov_hierarchy(n, l)
                for i in m:
                    yield i
        else:

            yield np.array(vals)

        iteration += 1
        if length is not None:
            breakout = True
예제 #9
0
    def __init__(self,
                 node,
                 num_tiles=None,
                 length_limit=None,
                 parent_rkey=None,
                 pre_process=None,
                 post_process=None):
        self.node = node

        self.num_tiles = listify_if_not_none(num_tiles)
        self.length_limit = listify_if_not_none(length_limit)
        self.type = c.TYPE_PROC

        self.pre_process = pre_process
        self.post_process = post_process

        if parent_rkey is not None:
            self.random_key = r.bind_generator_from(parent_rkey)
        else:
            self.random_key = r.bind_generator()
예제 #10
0
    def __init__(self,
                 values=None,
                 num_sinks=None,
                 sinks=None,
                 reduce_sinks=None,
                 parent_rkey=None,
                 **kwargs):

        if num_sinks is not None and sinks is not None:
            raise Exception(
                'You have to use either "num_sinks" or "sinks" to pass values. Both is not possible'
            )

        if parent_rkey is not None:
            self.random_key = r.bind_generator_from(parent_rkey)
        else:
            self.random_key = r.bind_generator()

        values = listify(values)
        l = len(values)
        preference_matrix = r.choice_from(self.random_key,
                                          np.arange(100),
                                          size=(l, l)).astype('float32')

        if num_sinks is not None:
            sinks = r.choice_from(self.random_key, np.arange(l), replace=False)
        sinks = listify(sinks)
        for s in sinks:
            arr = np.zeros(l)
            arr[s] = 1
            preference_matrix[s] = arr

        if reduce_sinks is not None:
            for s in sinks:
                preference_matrix[:,
                                  s] = preference_matrix[:, s] / reduce_sinks

        super().__init__(preference_matrix=preference_matrix,
                         values=values,
                         **kwargs)
예제 #11
0
def generate_image(color_string, seed):

    r.init_def_generator(seed)
    rgen = r.bind_generator()

    training_width = WIDTH * TRAINING_UPSAMPLE
    training_height = HEIGHT * TRAINING_UPSAMPLE
    labels = load_image_data(training_height, training_width)

    model = file.load_nnet(rgen, prefix=seed, custom_objects=CUSTOM_OBJECTS)
    print(model.summary())

    ly = np.linspace(-1, 1, num=HEIGHT)
    lx = np.linspace(-1, 1, num=WIDTH)
    yy, xx = np.meshgrid(ly, lx)

    nnet_input = generate_nnet_input(xx, yy, r.bind_generator_from(rgen))

    images_processed = []

    images = model.predict(nnet_input)
    images = [images]

    for image in images:
        image = image.reshape([HEIGHT, WIDTH])
        images_processed += [image]
    for label in labels:
        label = label.reshape(
            [HEIGHT * TRAINING_UPSAMPLE, WIDTH * TRAINING_UPSAMPLE])
        images_processed += [label]

    images_processed += [
        images_processed[0] -
        images_processed[-1][::TRAINING_UPSAMPLE, ::TRAINING_UPSAMPLE]
    ]
    return images_processed
예제 #12
0
def train_nnet(seed):

    r.init_def_generator(seed)
    rgen = r.bind_generator()
    rgen_save = r.bind_generator_from(rgen)

    training_width = WIDTH * TRAINING_UPSAMPLE
    training_height = HEIGHT * TRAINING_UPSAMPLE

    labels = load_image_data(training_height, training_width)

    ly1 = np.linspace(-1, 1, num=training_height)
    lx1 = np.linspace(-1, 1, num=training_width)
    yy1, xx1 = np.meshgrid(ly1, lx1)

    f = generate_nnet_input(xx1, yy1, r.bind_generator_from(rgen))

    input_size_base = f.shape[1]

    if CONTINUE_TRAINING is True:
        model = file.load_nnet(rgen_save,
                               prefix=seed,
                               custom_objects=CUSTOM_OBJECTS)
        rgen_save = r.reset_generator(rgen_save)

    else:
        model = build_nnet(input_size_base=input_size_base,
                           rgen=r.bind_generator_from(rgen))

    data_generator = nn.ImageDataGenerator(x=f,
                                           y=labels,
                                           shuffle=True,
                                           batch_size=BATCH_SIZE,
                                           rgen=r.bind_generator_from(rgen))

    def scheduler(epoch):
        current_learning_rate = LEARNING_RATE * (0.996**epoch)
        return current_learning_rate

    lr_callback = keras.callbacks.LearningRateScheduler(scheduler)
    tb_callback = keras.callbacks.TensorBoard(log_dir=C.PATH_FOLDER_LOGS,
                                              histogram_freq=1,
                                              write_grads=True,
                                              write_images=True)
    intermediary_output_callback = nn.SaveIntermediaryOutput(
        f=f, image_width=training_width, image_height=training_height)

    log_gradients_callback = nn.LogGradients(C.PATH_FOLDER_LOGS,
                                             data_generator)

    terminate_nan_callback = keras.callbacks.TerminateOnNaN()

    monitor_weights_callback = nn.MonitorWeights(C.PATH_FOLDER_LOGS, [
        'dense_5',
        'graphic_tanh_5',
        'sparse_connections_1',
        'graphic_tanh_4',
        'dense_4',
        'graphic_tanh_3',
    ], 0, data_generator)

    def mean_abs_metric(y_true, y_pred):
        return keras.backend.mean(
            keras.backend.abs(2 * (y_true - 0.25) - 2 * (y_pred - 0.25)))

    def compile_fit(model):
        optimizer = keras.optimizers.Adam(lr=LEARNING_RATE)
        # optimizer = keras.optimizers.Adamax(lr=LEARNING_RATE)
        model.compile(
            optimizer=optimizer,
            # loss=keras.losses.mean_squared_error,
            loss=keras.losses.mean_absolute_error,
            # metrics = [mean_abs_metric],
            loss_weights=LOSS_WEIGHTS)

        model_callbacks = [
            lr_callback,
            tb_callback,
            # log_gradients_callback,
            # monitor_weights_callback,
            terminate_nan_callback
        ]
        if SAVE_INTERMEDIARY is True:
            model_callbacks.append(
                nn.SaveIntermediaryNNet(rgen_save, prefix=seed))

        print(model.summary())

        for i in range(NUM_TRAINING_EPOCHS):
            print("TRAINING EPOCH", i)
            # x,y = data_generator.generate_shuffled_data()
            model.fit(
                x=data_generator,
                callbacks=model_callbacks,
                shuffle=False,
                validation_data=(f, labels[0]),
                epochs=1,
                workers=0,
            )
            data_generator.on_epoch_end()

    compile_fit(model)
    rgen_save = r.reset_generator(rgen_save)

    file.save_nnet(model, rgen_save, prefix=seed)
    rgen_save = r.reset_generator(rgen_save)
    def generate_full_image(color_string,seed):
        r.init_def_generator(seed)

        image = np.zeros((HEIGHT,WIDTH,3))
        plots = []
        loop_key = r.bind_generator()
        setup_key = r.bind_generator()


        p1 = m.MarkovModel(
                values=[0.1,-0.1],
                preference_matrix=data.str2mat('1 5, 5 1'),
                self_length=SEGMENT_LENGTH,
                parent_rkey=r.bind_generator_from(setup_key)

            )

        p2 = m.MarkovModel(
            values=[-0.1, 0.1],
            preference_matrix=data.str2mat('1 5, 5 1'),
            self_length=SEGMENT_LENGTH,
            parent_rkey=r.bind_generator_from(setup_key)

        )

        num_coefs = 12
        vs = np.sin(np.linspace(0, 1, num_coefs) * np.pi * 2) * 0.1
        p3 = m.SimpleProgression(
            values=vs,
            start_probs=0,
            self_length=[num_coefs],
            parent_rkey=r.bind_generator_from(loop_key)
        )

        p = m.MarkovModel(
            values=[p1, p2, p3],
            start_probs=2,
            preference_matrix=data.str2mat(
                '0 1 2, 1 0 2, 1 1 4'),
            self_length=HEIGHT//SEGMENT_LENGTH+1,
            parent_rkey=r.bind_generator_from(setup_key)
        )

        num_samples_1 = HEIGHT//2
        sample_scale_1 = m.sample_markov_hierarchy(p, num_samples_1)
        sample_2 = m.sample_markov_hierarchy(p, num_samples_1)
        sample_3 = m.sample_markov_hierarchy(p, num_samples_1)

        # interpolation_h_1 = integrate_and_normalize(sample_scale_1,2)
        # interpolation_h_2 = integrate_and_normalize(sample_2,2)
        interpolation_color = integrate_and_normalize(sample_3,2)

        color_repo = color.build_color_repository(color_string)
        meta = color.get_meta_from_palette(
            color_repo['First'],
            keys=[0,1,2,3],
            meta_cast_function=int)
        print(meta)
        color_lines = compute_color_lines(color_repo,interpolation_color)
        print(color_lines.shape)

        # plt.plot(interpolation_h_1)
        # plt.plot(interpolation_h_2)
        # plt.plot(interpolation_color)
        # plt.show()


        scale_1_freq = r.choice_from(setup_key,config.get('scale-1-freq-options',[0.025]))
        scale_2_freq = r.choice_from(setup_key,config.get('scale-2-freq-options',[0.02]))
        scale_1_scale = r.choice_from(setup_key,config.get('scale-1-scale-options',[0.02]))
        scale_2_scale = r.choice_from(setup_key,config.get('scale-2-scale-options',[0.02]))
        num_sin_coeffs = r.choice_from(setup_key,config.get('num-sin-coefficients-options',[18]))

        f1_scale = r.choice_from(setup_key,config.get('f1-scale-options',[0.2]))
        f2_scale = r.choice_from(setup_key,config.get('f2-scale-options',[0.4]))
        f3_scale = r.choice_from(setup_key,config.get('f3-scale-options',[0.15]))


        for current_row in range(HEIGHT):

            loop_key = r.reset_key(loop_key)

            # self_length = SEGMENT_LENGTH+int(10*np.sin(np.pi*i*0.01))
            self_length = SEGMENT_LENGTH
            # scale_1 = 0.1 * (1 - interpolation_h_1[current_row]) + 0.15 * interpolation_h_1[current_row]
            scale_1 = 0.1 + scale_1_scale * np.sin(np.pi * current_row * scale_1_freq )
            scale_2 = 0.1 + scale_2_scale * np.sin(np.pi * current_row * scale_2_freq )
            p1 = m.MarkovModel(
                values=[scale_1, -scale_2],
                preference_matrix=data.str2mat('1 5, 5 1'),
                self_length=self_length,
                parent_rkey=r.bind_generator_from(loop_key)

            )

            p2 = m.MarkovModel(
                values=[-scale_1, scale_2],
                preference_matrix=data.str2mat('1 5, 5 1'),
                self_length=self_length,
                parent_rkey=r.bind_generator_from(loop_key)

            )

            zeros = m.MarkovModel(
                values=[0,0],
                preference_matrix=data.str2mat('1 1, 1 1'),
                self_length=self_length*3,
                parent_rkey=r.bind_generator_from(loop_key)

            )

            jumps = m.MarkovModel(
                values=[-0.5, 0.5],
                preference_matrix=data.str2mat('1 1, 1 1'),
                self_length=1,
                parent_rkey=r.bind_generator_from(loop_key)

            )

            num_coefs = num_sin_coeffs
            vs = np.sin(np.linspace(0, 1, num_coefs) * np.pi * 2)*0.1
            p3 = m.SimpleProgression(
                values=vs,
                start_probs=0,
                self_length=[num_coefs],
                parent_rkey=r.bind_generator_from(loop_key)
            )

            p = m.MarkovModel(
                values=[p1, p2, p3, jumps, zeros],
                start_probs=2,
                preference_matrix=data.str2mat(
                    '0 1 2 2 1, 1 0 2 2 1, 1 1 4 2 2, 1 1 2 0 0, 1 1 1 1 2'),
                self_length=WIDTH//SEGMENT_LENGTH+1,
                parent_rkey=r.bind_generator_from(loop_key)
            )

            num_samples_1 = WIDTH//4
            num_samples_2 = WIDTH//3
            sample_x_up = m.sample_markov_hierarchy(p, num_samples_1)
            sample_x_down = m.sample_markov_hierarchy(p, num_samples_2)

            sample_x_up_int = data.integrate_series(sample_x_up,2,mean_influence=1)
            sample_x_down_int = data.integrate_series(sample_x_down,2,mean_influence=1)

            f1 = 0.5 + f1_scale * np.sin(np.pi * current_row * 0.002 )
            f2 = -1 - f2_scale * np.sin(np.pi * current_row * 0.002 )
            f3 = 0.3 + f3_scale * np.sin(np.pi * current_row * 0.001 )

            sample_x_up_int = data.concat_signals(
                [sample_x_up_int]*4,
                [f1,f2,f1,f2])

            sample_x_down_int = data.concat_signals(
                [sample_x_down_int,sample_x_down_int,sample_x_down_int],
                [f3, f1, f3])
            sample_x_down_int = np.r_[sample_x_down_int[0],sample_x_down_int]


            # roll_distance = 500 + int((interpolation_h_2[current_row]-0.5)*250)
            # roll_distance = 500 + int(current_row)
            # print(roll_distance)
            # sample_x_down_int = np.roll(sample_x_down_int, roll_distance)


            sample_x = sample_x_up_int + sample_x_down_int
            interpolation_sequence = sample_x[:HEIGHT]


            interpolation_sequence = gaussian_filter(interpolation_sequence,sigma=1)
            interpolation_sequence -= np.min(interpolation_sequence)
            interpolation_sequence /= np.max(interpolation_sequence)
            # interpolation_sequence = data.ease_inout_sin(interpolation_sequence)
            interpolation_sequence *= 3

            # interpolation_sequence *= 2
            # print(interpolation_sequence)

            gradient = data.interpolate(
                color_lines[:,current_row,:],
                interpolation_sequence,
                value_influences=meta
            )
            gradient = color.cam02_2_srgb(gradient)
            image[current_row] = gradient

            plots += [np.copy(interpolation_sequence)]

        image = data.upscale_nearest(image,ny=UPSCALE_FACTOR_Y,nx=UPSCALE_FACTOR_X)
        image[image<0] = 0
        image[image>255] = 255

        if SHOW_DEBUG_DATA is True:
            viz.animate_plots_y(plots)

        return image
예제 #14
0
    def __init__(self,
                 preference_matrix=None,
                 start_probs=None,
                 no_start=None,
                 values=None,
                 vs=None,
                 child_lengths=-1,
                 lenghts=None,
                 self_length=None,
                 update_step=None,
                 update_fun=None,
                 parent_rkey=None,
                 post_process=None):

        ## checking all the received inputs
        if vs is not None and values is not None:
            raise Exception(
                'You have to use either "v" or "values" to pass values. Both is not possible'
            )

        if values is None and vs is not None:
            values = vs

        ## initializing everything
        l = len(values)
        self.values = listify(values)

        # child_lengths is one way one can deduce if is leaf node or not
        self.child_lengths = array_listify(child_lengths)
        self.lengths = array_listify_if_not_none(lenghts)
        self.self_length = array_listify_if_not_none(self_length)
        self.type = c.TYPE_GEN

        # computing the start probabilities
        if start_probs is None:
            start_prefs = np.ones(l)
        elif is_listy(start_probs) and len(start_probs) == l:
            start_prefs = start_probs
        else:
            start_prefs = np.zeros(l)
            start_probs = listify(start_probs)
            start_prefs[start_probs] = 1

        # no start is a way to specify that you do not want the model to start in a certain state
        if no_start is not None:
            no_start = listify(no_start)
            start_prefs[no_start] = 0
        self.start_probs = start_prefs / np.sum(start_prefs)

        self.leaf = markov_model_is_leaf(values)
        self.preference_matrix = np.array(preference_matrix)
        self.transition_matrix = compute_transition_matrix(
            self.preference_matrix)

        self.update_step = update_step
        self.update_fun = update_fun
        self.simulated_length = 0

        self.post_process = post_process

        #setup the random number generator if it wasn't already setup by someone else
        if not hasattr(self, 'random_key'):
            if parent_rkey is not None:
                self.random_key = r.bind_generator_from(parent_rkey)
            else:
                self.random_key = r.bind_generator()
def generate_patch(height, width, color_dict, rkey):

    patch = np.zeros((height, width, 3), dtype='float64')

    color_start_lengths = np.array(
        [int(i) for i in color.get_meta_from_palette(color_dict)])

    num_color_samples = width // np.min(color_start_lengths) + 20

    color_codes = color.get_keys_from_palette(color_dict)
    pattern = m.FuzzyProgression(values=color_codes,
                                 positive_shifts=3,
                                 negative_shifts=3,
                                 self_length=num_color_samples,
                                 parent_rkey=r.bind_generator_from(rkey))

    sample_raw_start = m.sample_markov_hierarchy(
        pattern, num_color_samples).astype('int32')
    sample_raw_down_start = m.sample_markov_hierarchy(
        pattern, num_color_samples).astype('int32')
    # print(sample_raw_start)
    sample_raw_end = m.sample_markov_hierarchy(
        pattern, num_color_samples).astype('int32')
    sample_raw_down_end = m.sample_markov_hierarchy(
        pattern, num_color_samples).astype('int32')
    sample_raw_backup = m.sample_markov_hierarchy(
        pattern, num_color_samples).astype('int32')
    # making the probability of same color used smaller
    replace_mask = sample_raw_start == sample_raw_end
    sample_raw_end[replace_mask] = sample_raw_backup[replace_mask]

    sample_start = color.replace_indices_with_colors(sample_raw_start,
                                                     color_dict)
    sample_end = color.replace_indices_with_colors(sample_raw_end, color_dict)

    sample_down_start = color.replace_indices_with_colors(
        sample_raw_down_start, color_dict)
    sample_down_end = color.replace_indices_with_colors(
        sample_raw_down_end, color_dict)

    switch_key = r.bind_generator_from(rkey)
    switch = np.array([
        r.choice_from(switch_key, [0, 1], replace=False, size=(2, ))
        for i in range(sample_start.shape[0])
    ])

    sample_start_t = np.where(switch[:, 0][:, None], sample_start, sample_end)
    sample_end_t = np.where(switch[:, 1][:, None], sample_start, sample_end)

    sample_start = sample_start_t
    sample_end = sample_end_t

    start_lengths = color.get_meta_for_each_sample(sample_raw_start,
                                                   color_dict)

    start_lengths = np.array([int(i) for i in start_lengths])
    start_lengths = np.cumsum(start_lengths)

    num_vertical_reps = 2
    num_vertical_samples = height // num_vertical_reps + 3
    model = m.MarkovModel(
        values=np.arange(0, 41, 10) - 20,
        preference_matrix=data.str2mat(
            '0 1 5 1 0, 1 2 5 1 0, 0 1 10 1 0, 0 1 5 2 1, 0 1 5 1 0'),
        self_length=num_vertical_samples,
        parent_rkey=r.bind_generator_from(rkey))

    offsets = np.stack([
        m.sample_markov_hierarchy(model, num_vertical_samples)
        for _ in range(num_color_samples)
    ],
                       axis=1)

    offsets = np.repeat(offsets,
                        repeats=r.choice_from(
                            rkey, [num_vertical_reps + i for i in range(1)],
                            size=(num_vertical_samples, )),
                        axis=0)

    offsets = np.cumsum(offsets, axis=0)
    offsets += start_lengths
    offsets = np.hstack([np.zeros((offsets.shape[0], 1)), offsets])

    i = 0
    offset_index = 0

    transition = np.linspace(0, 1, num_vertical_samples)
    sample_start_gradient = sample_start[:, :, None] * (
        1 - transition) + sample_down_start[:, :, None] * transition
    sample_end_gradient = sample_end[:, :, None] * (
        1 - transition) + sample_down_end[:, :, None] * transition

    multiples_choices = r.choice_from(rkey,
                                      config.get('multiples-choices',
                                                 [20, 30, 40, 50]),
                                      size=(6, ))
    # print('multiples-choices',multiples_choices)

    while i < height:
        loop_key = r.bind_generator_from(rkey)
        current_lengths = offsets[offset_index]
        acum_max = np.maximum.accumulate(current_lengths)
        mask = acum_max == current_lengths

        diff = np.diff(current_lengths[mask])

        samples_start_masked = sample_start[mask[1:]]
        samples_end_masked = sample_end[mask[1:]]
        #
        # samples_start_masked = sample_start_gradient[:,:,i//num_vertical_reps][mask[1:]]
        # samples_end_masked = sample_end_gradient[:,:,i//num_vertical_reps][mask[1:]]

        p_switch = config.get('gradient-switch-p', 0.5)

        switch = r.choice_from(loop_key, [0, 1],
                               size=samples_start_masked.shape[0],
                               p=[p_switch, 1 - p_switch])
        switch = np.stack((switch, 1 - switch), axis=1)

        sample_start_switched = np.where(switch[:, 0][:, None],
                                         samples_start_masked,
                                         samples_end_masked)
        sample_end_switched = np.where(switch[:, 1][:, None],
                                       samples_start_masked,
                                       samples_end_masked)

        multiples = r.choice_from(loop_key, multiples_choices)

        gradient = generate_gradient(sample_start_switched,
                                     sample_end_switched, diff)[:width]
        patch[i:i + multiples] = gradient[None, :]
        i += multiples
        offset_index += 1

    patch[patch < 0] = 0
    patch[patch > 255] = 255
    return patch
def generate_image(gridx, gridy, color_repository, rkey):
    keys = list(color_repository.keys())
    key_probabilities = [0.4, 0.4, 0.2]
    img = np.zeros((HEIGHT, WIDTH, 3), dtype='float64')

    startx = 0
    starty = 0

    y_iteration = 0
    gridyextended = np.append(gridy, HEIGHT)
    gridxextended = np.append(gridx, HEIGHT)
    occupied = np.zeros((gridyextended.size, gridxextended.size), dtype='bool')
    for i, y in enumerate(gridyextended):
        endy = y
        y_iteration += 1
        rxkey = r.bind_generator_from(rkey)
        for j, x in enumerate(gridxextended):

            endx = x

            if occupied[i, j] > 0:
                startx = endx
                continue

            p = 0.5
            elongatey = r.choice_from(rxkey, [True, False], p=[p, 1 - p])
            elongatex = r.choice_from(rxkey, [True, False], p=[0, 1])
            if i >= gridyextended.size - 1: elongatey = False
            if j >= gridxextended.size - 1: elongatex = False

            startyactual = starty
            endyactual = endy
            startxactual = startx
            endxactual = endx

            height = endy - starty
            width = endx - startx

            if elongatey:
                add_height = gridyextended[i + 1] - gridyextended[i]
                height = endy + add_height - starty
                endyactual += add_height

            if elongatex:
                add_width = gridxextended[j + 1] - gridxextended[j]
                width = endx + add_width - startx
                endxactual += add_width

            if elongatex and elongatey:
                occupied[i:i + 2, j:j + 2] = True
            elif elongatex and not elongatey:
                occupied[i, j:j + 2] = True
            elif elongatey and not elongatex:
                occupied[i:i + 2, j] = True
            else:
                occupied[i, j] = True

            key = r.choice_from(rxkey, keys, p=key_probabilities)

            patch = r.call_and_bind_from(rxkey, generate_patch, height, width,
                                         color_repository[key])
            img[startyactual:endyactual, startxactual:endxactual] = patch

            startx = endx

        startx = 0
        starty = endy

    final = data.upscale_nearest(img, ny=UPSCALE_FACTOR, nx=UPSCALE_FACTOR)

    return final.astype('uint8')