예제 #1
0
def generate_full_image(color_string, seed):
    r.init_def_generator(seed)

    image = np.zeros((HEIGHT, WIDTH, 3))
    plots = []
    loop_key = r.bind_generator()
    setup_key = r.bind_generator()

    post_process = lambda x: data.integrate_series(
        x, n=2, mean_influences=[0, 0])

    pup = m.SimpleProgression(values=1,
                              self_length=[10, 40, 50],
                              post_process=post_process)
    pdown = m.SimpleProgression(values=-1,
                                self_length=[10, 40, 50],
                                post_process=post_process)
    arc = m.RandomMarkovModel(values=[pup, pdown], self_length=[2])

    p = m.RandomMarkovModel(
        # values=[p1, p2, arc],
        values=[arc],
        parent_rkey=r.bind_generator_from(setup_key))

    # for i in range(-30,30):
    for i in range(1):

        sample = m.sample_markov_hierarchy(p, 1000)
        sample = data.integrate_series(sample, 1, mean_influences=1)
        # sample = data.integrate_series(sample,1,mean_influences=0)
        # sample -= np.min(sample)
        # sample = data.integrate_series(sample,1)

        # slices = [ np.s_[:50],np.s_[50:100], np.s_[100:] ]
        # for slice in slices:
        #     sample[slice] -= np.mean(sample[slice])
        # sample = data.integrate_series(sample,1)
        # sample[:60+i] -= np.mean(sample[:60+i])
        # sample[60+i:] -= np.mean(sample[60+i:])
        # sample = data.integrate_series(sample,1)
        plots += [sample]

    plt.plot(plots[0])
    mng = plt.get_current_fig_manager()
    mng.full_screen_toggle()
    plt.show()
    # viz.animate_plots_y(plots)

    return image
예제 #2
0
def generate_neuron_images(color_string, seed):

    r.init_def_generator(seed)
    rgen = r.bind_generator()

    model = file.load_nnet(rgen, prefix=seed, custom_objects=CUSTOM_OBJECTS)
    print(model.summary())

    ly = np.linspace(-1, 1, num=HEIGHT)
    lx = np.linspace(-1, 1, num=WIDTH)
    yy, xx = np.meshgrid(ly, lx)

    nnet_input = generate_nnet_input(xx, yy, r.bind_generator_from(rgen))

    # layers_of_interest = model.layers[1:-1]
    # print(layers_of_interest)

    layer = model.get_layer(EXPLORE_LAYER)
    print(layer.weights)
    model_inter = keras.models.Model(inputs=model.input, outputs=layer.output)

    activations = model_inter.predict(nnet_input)

    print(activations.shape)
    num_images = activations.shape[1]
    images = []
    for i in range(num_images):
        image = activations[:, i]
        image = image.reshape([HEIGHT, WIDTH])
        # print(np.max(image),np.min(image))
        # image = np.clip(image+0.5,0,1)
        images += [image]

    return images
예제 #3
0
def generate_full_image(color_string, seed):
    r.init_def_generator(seed)
    rgen = r.bind_generator()

    input = tf.placeholder('float32', shape=[None, INPUT_SIZE, INPUT_SIZE])
    network = build_network(input=input, rgen=r.bind_generator_from(rgen))

    print(network.get_shape())

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)

        scale = 2
        ly1 = np.linspace(-1 * scale, 1 * scale, num=HEIGHT)
        ly2 = np.linspace(1 * scale, -1 * scale, num=HEIGHT)
        lx1 = np.linspace(-1 * scale, 1 * scale, num=WIDTH)
        lx2 = np.linspace(1 * scale, -1 * scale, num=WIDTH)

        yy1, xx1 = np.meshgrid(ly1, lx1)
        yy2, xx2 = np.meshgrid(ly2, lx2)

        f = np.c_[yy1.flatten(), xx1.flatten(), yy2.flatten(), xx2.flatten()]
        f = f.reshape([-1, 2, 2])

        output, = sess.run([network], {input: f})

    print(output.shape)
    image = output.reshape([HEIGHT, WIDTH, 3])
    print(np.max(image), np.min(image))
    image = np.clip(image, -1, 1) / 2 + 0.5
    return image
예제 #4
0
    def generate_full_image(color_string, seed):
        r.init_def_generator(seed)

        rkey = r.bind_generator()
        lp = len(PS)
        guard_size = 450
        image = np.zeros((HEIGHT + guard_size * 2, WIDTH + guard_size * 2))

        # num_circles = r.choice_from(rkey,[5,10,15,20,25])
        # num_circles = r.choice_from(rkey,[30,40,50,60,70,80,90,100])
        # num_circles = r.choice_from(rkey,[80,120,140])
        # num_circles = r.choice_from(rkey,[200,220,240])
        num_circles = 10
        ps = r.choice_from(rkey, PS, lp)
        for i in range(num_circles):

            loopkey = r.bind_generator_from(rkey)
            band_size = r.choice_from(loopkey, [10] + [15])
            circle = gradient_circle(band_size, PS[::-1] + PS[::2] + PS[1::2],
                                     r.bind_generator_from(loopkey))

            cheight, cwidth = circle.shape

            xstart = r.choice_from(loopkey, np.arange(WIDTH + 250))
            ystart = r.choice_from(loopkey, np.arange(HEIGHT + 250))

            image[ystart:ystart + cheight, xstart:xstart + cwidth] += circle

        image = image[guard_size:HEIGHT + guard_size,
                      guard_size:WIDTH + guard_size]
        image /= np.max(image)

        return data.upscale_nearest(data.prepare_image_for_export(image * 255),
                                    ny=UPSCALE_FACTOR_Y,
                                    nx=UPSCALE_FACTOR_X)
예제 #5
0
    def generate_full_image(color_string, seed):
        r.init_def_generator(seed)

        rkey = r.bind_generator()

        p = PS[current_iteration]

        image = r.binomial_from(rkey, 1, p, size=(HEIGHT, WIDTH)) * 255

        return data.upscale_nearest(data.prepare_image_for_export(image),
                                    ny=UPSCALE_FACTOR_Y,
                                    nx=UPSCALE_FACTOR_X)
예제 #6
0
    def generate_full_image(color_string, seed):
        r.init_def_generator(seed)

        rkey = r.bind_generator()
        image = np.zeros((HEIGHT, WIDTH))

        for i in range(NUM_SEGMENTS):
            start = i * SEGMENT_LENGTH
            end = (i + 1) * SEGMENT_LENGTH
            image[:,
                  start:end] = r.binomial_from(rkey,
                                               1,
                                               PS[i],
                                               size=(HEIGHT, SEGMENT_LENGTH))

        return data.upscale_nearest(data.prepare_image_for_export(image * 255),
                                    ny=UPSCALE_FACTOR_Y,
                                    nx=UPSCALE_FACTOR_X)
예제 #7
0
    def generate_full_image(color_string,seed):
        r.init_def_generator(seed)

        rkey = r.bind_generator()
        image = np.zeros((HEIGHT,WIDTH))

        p1,p2 = PS[current_iteration]


        image[:,:WIDTH//2] = r.binomial_from(rkey,1,p1,size=(HEIGHT,WIDTH//2))
        image[:,WIDTH//2:] = r.binomial_from(rkey,1,p2,size=(HEIGHT,WIDTH//2))



        return  data.upscale_nearest(
            data.prepare_image_for_export(image*255),
            ny=UPSCALE_FACTOR_Y,
            nx=UPSCALE_FACTOR_X
        )
예제 #8
0
    def __init__(self,
                 node,
                 num_tiles=None,
                 length_limit=None,
                 parent_rkey=None,
                 pre_process=None,
                 post_process=None):
        self.node = node

        self.num_tiles = listify_if_not_none(num_tiles)
        self.length_limit = listify_if_not_none(length_limit)
        self.type = c.TYPE_PROC

        self.pre_process = pre_process
        self.post_process = post_process

        if parent_rkey is not None:
            self.random_key = r.bind_generator_from(parent_rkey)
        else:
            self.random_key = r.bind_generator()
예제 #9
0
    def generate_full_image(color_string, seed):
        r.init_def_generator(seed)

        rkey = r.bind_generator()
        lp = len(PS)
        template_height = lp * BAND_LEN
        image = np.zeros((template_height, template_height))

        for i, p in enumerate(PS):
            cheight = (lp - i) * BAND_LEN
            c = gen.circle((template_height, template_height), cheight // 2)
            p = r.binomial_from(rkey,
                                1,
                                p,
                                size=(template_height, template_height))
            mask = c == 1
            image[mask] = p[mask]

        return data.upscale_nearest(data.prepare_image_for_export(image * 255),
                                    ny=UPSCALE_FACTOR_Y,
                                    nx=UPSCALE_FACTOR_X)
예제 #10
0
    def __init__(self,
                 values=None,
                 num_sinks=None,
                 sinks=None,
                 reduce_sinks=None,
                 parent_rkey=None,
                 **kwargs):

        if num_sinks is not None and sinks is not None:
            raise Exception(
                'You have to use either "num_sinks" or "sinks" to pass values. Both is not possible'
            )

        if parent_rkey is not None:
            self.random_key = r.bind_generator_from(parent_rkey)
        else:
            self.random_key = r.bind_generator()

        values = listify(values)
        l = len(values)
        preference_matrix = r.choice_from(self.random_key,
                                          np.arange(100),
                                          size=(l, l)).astype('float32')

        if num_sinks is not None:
            sinks = r.choice_from(self.random_key, np.arange(l), replace=False)
        sinks = listify(sinks)
        for s in sinks:
            arr = np.zeros(l)
            arr[s] = 1
            preference_matrix[s] = arr

        if reduce_sinks is not None:
            for s in sinks:
                preference_matrix[:,
                                  s] = preference_matrix[:, s] / reduce_sinks

        super().__init__(preference_matrix=preference_matrix,
                         values=values,
                         **kwargs)
예제 #11
0
    def generate_full_image(color_string, seed):
        r.init_def_generator(seed)

        rkey = r.bind_generator()

        template_height = NUM_BANDS * BAND_LEN
        image = np.zeros((template_height, template_height))

        for i, p in enumerate(range(NUM_BANDS)):
            cheight = (NUM_BANDS - i) * BAND_LEN
            c = gen.circle((template_height, template_height), cheight // 2)
            p = r.poisson_from(rkey,
                               i + 1.1,
                               size=(template_height, template_height))
            mask = c == 1
            image[mask] = p[mask]

        image = image / np.max(image)

        return data.upscale_nearest(data.prepare_image_for_export(image * 255),
                                    ny=UPSCALE_FACTOR_Y,
                                    nx=UPSCALE_FACTOR_X)
예제 #12
0
def generate_image(color_string, seed):

    r.init_def_generator(seed)
    rgen = r.bind_generator()

    training_width = WIDTH * TRAINING_UPSAMPLE
    training_height = HEIGHT * TRAINING_UPSAMPLE
    labels = load_image_data(training_height, training_width)

    model = file.load_nnet(rgen, prefix=seed, custom_objects=CUSTOM_OBJECTS)
    print(model.summary())

    ly = np.linspace(-1, 1, num=HEIGHT)
    lx = np.linspace(-1, 1, num=WIDTH)
    yy, xx = np.meshgrid(ly, lx)

    nnet_input = generate_nnet_input(xx, yy, r.bind_generator_from(rgen))

    images_processed = []

    images = model.predict(nnet_input)
    images = [images]

    for image in images:
        image = image.reshape([HEIGHT, WIDTH])
        images_processed += [image]
    for label in labels:
        label = label.reshape(
            [HEIGHT * TRAINING_UPSAMPLE, WIDTH * TRAINING_UPSAMPLE])
        images_processed += [label]

    images_processed += [
        images_processed[0] -
        images_processed[-1][::TRAINING_UPSAMPLE, ::TRAINING_UPSAMPLE]
    ]
    return images_processed
예제 #13
0
    def generate_full_image(color_string,seed):
        r.init_def_generator(seed)

        rkey = r.bind_generator()
        guard_size = 200
        image = np.zeros((HEIGHT+guard_size*2,WIDTH+guard_size*2))

        num_circles = r.choice_from(rkey,[5,10,15,20,25])
        # num_circles = r.choice_from(rkey,[30,40,50,60,70,80,90,100])
        # num_circles = r.choice_from(rkey,[400,500,600,700,800])
        for i in range(num_circles):

            loopkey = r.bind_generator_from(rkey)
            band_size = r.choice_from(loopkey,np.arange(5,10))
            circle = gradient_circle(
                band_size,
                r.bind_generator_from(loopkey)
            )

            cheight,cwidth = circle.shape

            xstart = r.choice_from(loopkey,np.arange(WIDTH+100))
            ystart = r.choice_from(loopkey,np.arange(HEIGHT+100))
            image[ystart:ystart+cheight,xstart:xstart+cwidth] += circle

        image /= np.max(image)
        image = image[
                guard_size:HEIGHT+guard_size,
                guard_size:WIDTH+guard_size]


        return  data.upscale_nearest(
            data.prepare_image_for_export(image*255),
            ny=UPSCALE_FACTOR_Y,
            nx=UPSCALE_FACTOR_X
        )
예제 #14
0
def train_nnet(seed):

    r.init_def_generator(seed)
    rgen = r.bind_generator()
    rgen_save = r.bind_generator_from(rgen)

    training_width = WIDTH * TRAINING_UPSAMPLE
    training_height = HEIGHT * TRAINING_UPSAMPLE

    labels = load_image_data(training_height, training_width)

    ly1 = np.linspace(-1, 1, num=training_height)
    lx1 = np.linspace(-1, 1, num=training_width)
    yy1, xx1 = np.meshgrid(ly1, lx1)

    f = generate_nnet_input(xx1, yy1, r.bind_generator_from(rgen))

    input_size_base = f.shape[1]

    if CONTINUE_TRAINING is True:
        model = file.load_nnet(rgen_save,
                               prefix=seed,
                               custom_objects=CUSTOM_OBJECTS)
        rgen_save = r.reset_generator(rgen_save)

    else:
        model = build_nnet(input_size_base=input_size_base,
                           rgen=r.bind_generator_from(rgen))

    data_generator = nn.ImageDataGenerator(x=f,
                                           y=labels,
                                           shuffle=True,
                                           batch_size=BATCH_SIZE,
                                           rgen=r.bind_generator_from(rgen))

    def scheduler(epoch):
        current_learning_rate = LEARNING_RATE * (0.996**epoch)
        return current_learning_rate

    lr_callback = keras.callbacks.LearningRateScheduler(scheduler)
    tb_callback = keras.callbacks.TensorBoard(log_dir=C.PATH_FOLDER_LOGS,
                                              histogram_freq=1,
                                              write_grads=True,
                                              write_images=True)
    intermediary_output_callback = nn.SaveIntermediaryOutput(
        f=f, image_width=training_width, image_height=training_height)

    log_gradients_callback = nn.LogGradients(C.PATH_FOLDER_LOGS,
                                             data_generator)

    terminate_nan_callback = keras.callbacks.TerminateOnNaN()

    monitor_weights_callback = nn.MonitorWeights(C.PATH_FOLDER_LOGS, [
        'dense_5',
        'graphic_tanh_5',
        'sparse_connections_1',
        'graphic_tanh_4',
        'dense_4',
        'graphic_tanh_3',
    ], 0, data_generator)

    def mean_abs_metric(y_true, y_pred):
        return keras.backend.mean(
            keras.backend.abs(2 * (y_true - 0.25) - 2 * (y_pred - 0.25)))

    def compile_fit(model):
        optimizer = keras.optimizers.Adam(lr=LEARNING_RATE)
        # optimizer = keras.optimizers.Adamax(lr=LEARNING_RATE)
        model.compile(
            optimizer=optimizer,
            # loss=keras.losses.mean_squared_error,
            loss=keras.losses.mean_absolute_error,
            # metrics = [mean_abs_metric],
            loss_weights=LOSS_WEIGHTS)

        model_callbacks = [
            lr_callback,
            tb_callback,
            # log_gradients_callback,
            # monitor_weights_callback,
            terminate_nan_callback
        ]
        if SAVE_INTERMEDIARY is True:
            model_callbacks.append(
                nn.SaveIntermediaryNNet(rgen_save, prefix=seed))

        print(model.summary())

        for i in range(NUM_TRAINING_EPOCHS):
            print("TRAINING EPOCH", i)
            # x,y = data_generator.generate_shuffled_data()
            model.fit(
                x=data_generator,
                callbacks=model_callbacks,
                shuffle=False,
                validation_data=(f, labels[0]),
                epochs=1,
                workers=0,
            )
            data_generator.on_epoch_end()

    compile_fit(model)
    rgen_save = r.reset_generator(rgen_save)

    file.save_nnet(model, rgen_save, prefix=seed)
    rgen_save = r.reset_generator(rgen_save)
    def generate_full_image(color_string,seed):
        r.init_def_generator(seed)

        image = np.zeros((HEIGHT,WIDTH,3))
        plots = []
        loop_key = r.bind_generator()
        setup_key = r.bind_generator()


        p1 = m.MarkovModel(
                values=[0.1,-0.1],
                preference_matrix=data.str2mat('1 5, 5 1'),
                self_length=SEGMENT_LENGTH,
                parent_rkey=r.bind_generator_from(setup_key)

            )

        p2 = m.MarkovModel(
            values=[-0.1, 0.1],
            preference_matrix=data.str2mat('1 5, 5 1'),
            self_length=SEGMENT_LENGTH,
            parent_rkey=r.bind_generator_from(setup_key)

        )

        num_coefs = 12
        vs = np.sin(np.linspace(0, 1, num_coefs) * np.pi * 2) * 0.1
        p3 = m.SimpleProgression(
            values=vs,
            start_probs=0,
            self_length=[num_coefs],
            parent_rkey=r.bind_generator_from(loop_key)
        )

        p = m.MarkovModel(
            values=[p1, p2, p3],
            start_probs=2,
            preference_matrix=data.str2mat(
                '0 1 2, 1 0 2, 1 1 4'),
            self_length=HEIGHT//SEGMENT_LENGTH+1,
            parent_rkey=r.bind_generator_from(setup_key)
        )

        num_samples_1 = HEIGHT//2
        sample_scale_1 = m.sample_markov_hierarchy(p, num_samples_1)
        sample_2 = m.sample_markov_hierarchy(p, num_samples_1)
        sample_3 = m.sample_markov_hierarchy(p, num_samples_1)

        # interpolation_h_1 = integrate_and_normalize(sample_scale_1,2)
        # interpolation_h_2 = integrate_and_normalize(sample_2,2)
        interpolation_color = integrate_and_normalize(sample_3,2)

        color_repo = color.build_color_repository(color_string)
        meta = color.get_meta_from_palette(
            color_repo['First'],
            keys=[0,1,2,3],
            meta_cast_function=int)
        print(meta)
        color_lines = compute_color_lines(color_repo,interpolation_color)
        print(color_lines.shape)

        # plt.plot(interpolation_h_1)
        # plt.plot(interpolation_h_2)
        # plt.plot(interpolation_color)
        # plt.show()


        scale_1_freq = r.choice_from(setup_key,config.get('scale-1-freq-options',[0.025]))
        scale_2_freq = r.choice_from(setup_key,config.get('scale-2-freq-options',[0.02]))
        scale_1_scale = r.choice_from(setup_key,config.get('scale-1-scale-options',[0.02]))
        scale_2_scale = r.choice_from(setup_key,config.get('scale-2-scale-options',[0.02]))
        num_sin_coeffs = r.choice_from(setup_key,config.get('num-sin-coefficients-options',[18]))

        f1_scale = r.choice_from(setup_key,config.get('f1-scale-options',[0.2]))
        f2_scale = r.choice_from(setup_key,config.get('f2-scale-options',[0.4]))
        f3_scale = r.choice_from(setup_key,config.get('f3-scale-options',[0.15]))


        for current_row in range(HEIGHT):

            loop_key = r.reset_key(loop_key)

            # self_length = SEGMENT_LENGTH+int(10*np.sin(np.pi*i*0.01))
            self_length = SEGMENT_LENGTH
            # scale_1 = 0.1 * (1 - interpolation_h_1[current_row]) + 0.15 * interpolation_h_1[current_row]
            scale_1 = 0.1 + scale_1_scale * np.sin(np.pi * current_row * scale_1_freq )
            scale_2 = 0.1 + scale_2_scale * np.sin(np.pi * current_row * scale_2_freq )
            p1 = m.MarkovModel(
                values=[scale_1, -scale_2],
                preference_matrix=data.str2mat('1 5, 5 1'),
                self_length=self_length,
                parent_rkey=r.bind_generator_from(loop_key)

            )

            p2 = m.MarkovModel(
                values=[-scale_1, scale_2],
                preference_matrix=data.str2mat('1 5, 5 1'),
                self_length=self_length,
                parent_rkey=r.bind_generator_from(loop_key)

            )

            zeros = m.MarkovModel(
                values=[0,0],
                preference_matrix=data.str2mat('1 1, 1 1'),
                self_length=self_length*3,
                parent_rkey=r.bind_generator_from(loop_key)

            )

            jumps = m.MarkovModel(
                values=[-0.5, 0.5],
                preference_matrix=data.str2mat('1 1, 1 1'),
                self_length=1,
                parent_rkey=r.bind_generator_from(loop_key)

            )

            num_coefs = num_sin_coeffs
            vs = np.sin(np.linspace(0, 1, num_coefs) * np.pi * 2)*0.1
            p3 = m.SimpleProgression(
                values=vs,
                start_probs=0,
                self_length=[num_coefs],
                parent_rkey=r.bind_generator_from(loop_key)
            )

            p = m.MarkovModel(
                values=[p1, p2, p3, jumps, zeros],
                start_probs=2,
                preference_matrix=data.str2mat(
                    '0 1 2 2 1, 1 0 2 2 1, 1 1 4 2 2, 1 1 2 0 0, 1 1 1 1 2'),
                self_length=WIDTH//SEGMENT_LENGTH+1,
                parent_rkey=r.bind_generator_from(loop_key)
            )

            num_samples_1 = WIDTH//4
            num_samples_2 = WIDTH//3
            sample_x_up = m.sample_markov_hierarchy(p, num_samples_1)
            sample_x_down = m.sample_markov_hierarchy(p, num_samples_2)

            sample_x_up_int = data.integrate_series(sample_x_up,2,mean_influence=1)
            sample_x_down_int = data.integrate_series(sample_x_down,2,mean_influence=1)

            f1 = 0.5 + f1_scale * np.sin(np.pi * current_row * 0.002 )
            f2 = -1 - f2_scale * np.sin(np.pi * current_row * 0.002 )
            f3 = 0.3 + f3_scale * np.sin(np.pi * current_row * 0.001 )

            sample_x_up_int = data.concat_signals(
                [sample_x_up_int]*4,
                [f1,f2,f1,f2])

            sample_x_down_int = data.concat_signals(
                [sample_x_down_int,sample_x_down_int,sample_x_down_int],
                [f3, f1, f3])
            sample_x_down_int = np.r_[sample_x_down_int[0],sample_x_down_int]


            # roll_distance = 500 + int((interpolation_h_2[current_row]-0.5)*250)
            # roll_distance = 500 + int(current_row)
            # print(roll_distance)
            # sample_x_down_int = np.roll(sample_x_down_int, roll_distance)


            sample_x = sample_x_up_int + sample_x_down_int
            interpolation_sequence = sample_x[:HEIGHT]


            interpolation_sequence = gaussian_filter(interpolation_sequence,sigma=1)
            interpolation_sequence -= np.min(interpolation_sequence)
            interpolation_sequence /= np.max(interpolation_sequence)
            # interpolation_sequence = data.ease_inout_sin(interpolation_sequence)
            interpolation_sequence *= 3

            # interpolation_sequence *= 2
            # print(interpolation_sequence)

            gradient = data.interpolate(
                color_lines[:,current_row,:],
                interpolation_sequence,
                value_influences=meta
            )
            gradient = color.cam02_2_srgb(gradient)
            image[current_row] = gradient

            plots += [np.copy(interpolation_sequence)]

        image = data.upscale_nearest(image,ny=UPSCALE_FACTOR_Y,nx=UPSCALE_FACTOR_X)
        image[image<0] = 0
        image[image>255] = 255

        if SHOW_DEBUG_DATA is True:
            viz.animate_plots_y(plots)

        return image
예제 #16
0
    def __init__(self,
                 preference_matrix=None,
                 start_probs=None,
                 no_start=None,
                 values=None,
                 vs=None,
                 child_lengths=-1,
                 lenghts=None,
                 self_length=None,
                 update_step=None,
                 update_fun=None,
                 parent_rkey=None,
                 post_process=None):

        ## checking all the received inputs
        if vs is not None and values is not None:
            raise Exception(
                'You have to use either "v" or "values" to pass values. Both is not possible'
            )

        if values is None and vs is not None:
            values = vs

        ## initializing everything
        l = len(values)
        self.values = listify(values)

        # child_lengths is one way one can deduce if is leaf node or not
        self.child_lengths = array_listify(child_lengths)
        self.lengths = array_listify_if_not_none(lenghts)
        self.self_length = array_listify_if_not_none(self_length)
        self.type = c.TYPE_GEN

        # computing the start probabilities
        if start_probs is None:
            start_prefs = np.ones(l)
        elif is_listy(start_probs) and len(start_probs) == l:
            start_prefs = start_probs
        else:
            start_prefs = np.zeros(l)
            start_probs = listify(start_probs)
            start_prefs[start_probs] = 1

        # no start is a way to specify that you do not want the model to start in a certain state
        if no_start is not None:
            no_start = listify(no_start)
            start_prefs[no_start] = 0
        self.start_probs = start_prefs / np.sum(start_prefs)

        self.leaf = markov_model_is_leaf(values)
        self.preference_matrix = np.array(preference_matrix)
        self.transition_matrix = compute_transition_matrix(
            self.preference_matrix)

        self.update_step = update_step
        self.update_fun = update_fun
        self.simulated_length = 0

        self.post_process = post_process

        #setup the random number generator if it wasn't already setup by someone else
        if not hasattr(self, 'random_key'):
            if parent_rkey is not None:
                self.random_key = r.bind_generator_from(parent_rkey)
            else:
                self.random_key = r.bind_generator()