Ejemplo n.º 1
0
def main():
    ws = obsws(host, port, password)
    ws.connect()

    PNFactoryX = perlin.PerlinNoiseFactory(1)
    PNFactoryY = perlin.PerlinNoiseFactory(1)

    ret = ws.call(requests.GetCurrentScene())
    print("current scene : ", ret.getName())
    sources = ret.getSources()
    print("current sources : ", sources)
    for source in sources:
        properties = ws.call(requests.GetSceneItemProperties(source["name"]))
        print("properties : ", properties.datain)
        s1 = PROPERTIES(properties.datain)

    w = WINDOW_EASER(1280, 720)
    w.setWindow("f1")
    w.setKeyFrame("f1", 10 * 2, [-0.05, -0.05, 1.1, 1.1])
    w.setKeyFrame("f1", 20 * 2, [-0.05, -0.05, 1.1, 1.1])
    w.setKeyFrame("f1", 40 * 2, [-0.2, -0.2, 1.4, 1.4])
    w.setKeyFrame("f1", 50 * 2, [-0.2, -0.2, 1.4, 1.4])
    w.setKeyFrame("f1", 70 * 2, [-0.05, -0.05, 1.1, 1.1])
    w.setKeyFrame("f1", 80 * 2, [-0.05, -0.05, 1.1, 1.1])
    w.setupKeyFrame()
    while 1:
        for i in range(80 * 2):
            #print "=====================",i
            ret = w.update()
            #frame = w.draw()
            for source in sources:
                scale = s1.getScale(ret["f1"][3])
                data = requests.SetSceneItemTransform(source["name"], scale,
                                                      scale, 0).data()
                data["message-id"] = 100
                ws.ws.send(json.dumps(data))

                data = requests.SetSceneItemPosition(
                    source["name"],
                    ret["f1"][0] + PNFactoryX(i / 30.0) * 20 - 20,
                    ret["f1"][1] + PNFactoryY(i / 30.0) * 20 - 20).data()
                data["message-id"] = 100
                ws.ws.send(json.dumps(data))
                time.sleep(0.02)

            #cv2.imshow("window",frame)
            #cv2.waitKey(1)
        w.initKeyFrame()

    ws.disconnect()
Ejemplo n.º 2
0
    def generate_forest(self, threshold=0.25, tree_chance=0.2):
        noise = []
        for i in range(self.width):
            noise.append([])
            for j in range(self.height):
                noise[i].append(0)

        PNFactory_forest = perlin.PerlinNoiseFactory(2,
                                                     octaves=3,
                                                     tile=(),
                                                     unbias=False)

        for i in range(self.width):
            for j in range(self.height):
                noise[i][j] = PNFactory_forest(i / self.width, j / self.height)

        noise1D = []
        for i in range(self.width):
            for j in range(self.height):
                noise1D.append(noise[i][j])
        _min = np.min(noise1D)
        _max = np.max(noise1D)
        for i in range(self.width):
            for j in range(self.height):
                v = utils.normalise(noise[i][j], _min, _max)
                if v < threshold and self.grid[i][j].get_type(
                ) in life.Tree.get_good_tiles():
                    if self.grid[i][j].food == None and not self.grid[i][
                            j].is_river and np.random.random() < tree_chance:
                        self.grid[i][j].set_tree(
                            life.Tree(self.simu,
                                      self.grid[i][j],
                                      randomness=True))
Ejemplo n.º 3
0
    def generate_elevation(self, start_tile=None):
        noise = []
        for i in range(self.width):
            noise.append([])
            for j in range(self.height):
                noise[i].append(0)

        PNFactory = perlin.PerlinNoiseFactory(2,
                                              octaves=4,
                                              tile=(),
                                              unbias=True)

        for i in range(self.width):
            for j in range(self.height):
                noise[i][j] = PNFactory(i / self.width, j / self.height)

        noise1D = []
        for i in range(self.width):
            for j in range(self.height):
                noise1D.append(noise[i][j])

        _min = np.min(noise1D)
        _max = np.max(noise1D)

        for i in range(self.width):
            for j in range(self.height):
                self.grid[i][j].elevation_raw = utils.normalise(
                    noise[i][j], _min, _max)
                self.grid[i][j].elevation = -3 + (
                    self.grid[i][j].elevation_raw * 11)
                self.grid[i][j].set_type_from_elevation()
                if self.grid[i][j] == "SHALLOW_WATER":
                    self.shallow_water_tiles.append(self.grid[i][j])
Ejemplo n.º 4
0
def generate_perlin(screen, random_int_from_fav_thing, random_int_from_name):
    # perlin noise for squiggles
    perlin.random.seed(
        random_int_from_fav_thing(2, 5) + random_int_from_name(3, 7))
    x_noise = perlin.PerlinNoiseFactory(1, octaves=5)
    y_noise = perlin.PerlinNoiseFactory(1, octaves=5)
    lines = [
        [],
    ]
    drawer_vel = pygame.Vector2()
    drawer_pos = pygame.Vector2()
    drawer_pos.x, drawer_pos.y = 200, 200
    current_line = 0
    # out of bounds flag
    oob = False

    # generate the random squigglies
    for i in range(0, 1000, 1):
        drawer_vel.x = x_noise(i / 500)
        drawer_vel.y = y_noise(i / 500)
        if drawer_vel.x != 0 or drawer_vel.y != 0:
            drawer_vel.normalize_ip()
            drawer_vel *= 5
            drawer_pos += drawer_vel
        if drawer_pos.x < 0:
            drawer_pos.x = screen.get_width()
            oob = True
        elif drawer_pos.x > screen.get_width():
            drawer_pos.x = 0
            oob = True
        elif drawer_pos.y < 0:
            drawer_pos.y = screen.get_height()
            oob = True
        elif drawer_pos.y > screen.get_height():
            drawer_pos.y = 0
            oob = True
        if oob:
            lines.append(list())
            current_line += 1
            oob = False
        lines[current_line].append((int(drawer_pos.x), int(drawer_pos.y)))
    return lines
Ejemplo n.º 5
0
 def generate_perlin_layers(param, nl):
     fudge_factor = 1.5  # empirically observed
     lcor = param['corr_length']
     lstr = param['strength']
     lyrs = list()
     for il in range(nl):
         noisegen = pe.PerlinNoiseFactory(2, seed=seed)
         lyr = fudge_factor * lstr * np.asarray([noisegen(iy, ix)
                                                 for ix in np.linspace(-lcor, lcor, n)
                                                 for iy in np.linspace(-lcor, lcor, m)]).reshape(m, n)
         lyrs.append(lyr)
     lyrs = np.asarray(lyrs)
     return lyrs
Ejemplo n.º 6
0
 def __init__(self, width, height):
     self.objects = []
     self.width = width
     self.height = height
     r = random.randint(0, 999)
     self.terrain = np.zeros((width, height))
     noise = perlin.PerlinNoiseFactory(3, 24)
     print("starting terrain generation")
     for x in range(width):
         for y in range(height):
             self.terrain[x][y] = 0.5 + noise(x / width, y / height, r)
         p = x / width
         if (p * 100) % 10.0 < 0.3:
             print(int(p * 100), "% complete")
     print("generated world")
     self.age = 0
     self.curr_year = 0
def generate_data(tmin=15, tmax=22, imin=0, imax=24, d=1440):
    """
    Generates data points with values from [tmin, tmax] and in the
    time interval [imin, imax]

    Returns: A numpy array of dimension (imax - imin)*d x 1
    """

    pnf = perlin.PerlinNoiseFactory(1, 5)
    noise = np.linspace(imin, imax, d, endpoint=False) / imax
    i = 0
    for p in noise:
        noise[i] = pnf(p)
        i = i + 1

    noise = np.abs((noise + 1) * tmax)
    #np.clip(noise, tmin, tmax, out=noise)
    return noise
class perlinWalker:

    t = None
    screen = None
    perlinGenerator = perlin.PerlinNoiseFactory(
        1)  #1 dimension argument passed

    def setupBoard(self, width=500, height=500):
        self.t = turtle.Turtle()
        self.screen = turtle.Screen()
        self.screen.colormode(255)
        self.t.fillcolor((100, 1, 1))
        self.t.ht()
        self.screen.setup(width, height)
        self.screen.tracer(0)

    def map_range(self, value, start1, stop1, start2, stop2):
        return (value - start1) / (stop1 - start1) * (stop2 - start2) + start2

    def perlinGenerate(self, x):
        num = self.perlinGenerator(x)
        #print(num)
        return num

    def drawCircle(self):
        tX = random.randrange(800)
        tY = random.randrange(800)

        while True:
            self.t.clear()
            xPosition = self.map_range(self.perlinGenerate(tX), 0, 1, 0, 800)
            yPosition = self.map_range(self.perlinGenerate(tY), 0, 1, 0, 800)
            print("xPosition: " + str(xPosition) + " yPosition: " +
                  str(yPosition))
            #xPosition = 300
            #yPosition = 400
            self.t.goto(xPosition, yPosition)
            self.t.dot(30)
            self.screen.update()
            tX += 0.0001
            tY += 0.0001
Ejemplo n.º 9
0
password = "******"

import perlin

if __name__ == "__main__":
    ws = obsws(host, port, password)
    ws.connect()

    ret = ws.call(requests.GetCurrentScene())
    print("current scene : ", ret.getName())
    sources = ret.getSources()
    print("current sources : ", sources)
    for source in sources:
        properties = ws.call(requests.GetSceneItemProperties(source["name"]))
        print("properties : ", properties.datain)

    PNFactoryX = perlin.PerlinNoiseFactory(1)
    PNFactoryY = perlin.PerlinNoiseFactory(1)

    for i in range(1200):
        for source in sources:
            data = requests.SetSceneItemPosition(
                source["name"],
                PNFactoryX(i / 30.0) * 20 - 20,
                PNFactoryY(i / 30.0) * 20 - 20).data()
            data["message-id"] = 100
            ws.ws.send(json.dumps(data))
            time.sleep(0.03)
    t2 = time.time()

    ws.disconnect()
Ejemplo n.º 10
0
    def __init__(self,
                 dimensions,
                 heightmod,
                 heightboost,
                 genBeach=True,
                 printh=False,
                 waterheight=0):
        ###GENERATE###
        pn = perlin.PerlinNoiseFactory(2, octaves=2)
        termap = {}
        terimg = Image.new("RGB", (dimensions, dimensions))
        terpx = terimg.load()
        for x in range(dimensions):
            termap[x] = {}
            for y in range(dimensions):
                value = pn(x / (dimensions / 2), y /
                           (dimensions / 2)) * heightmod + heightboost
                #print(value)
                i = ""
                p = (255, 255, 255)
                if value <= waterheight:
                    i = "r"

                else:
                    i = "g"
                    p = (0, 0, 0)
                termap[x][y] = i
                terpx[x, y] = p

        #pseudocode
        #only blank and not blank
        #then replace edges with #

    #Idea: if more water than land, flip the water and land

        if genBeach:
            for x in range(dimensions - 1):
                for y in range(dimensions - 1):
                    if x > 0 and y > 0:
                        if termap[x][y] == "g" and termap[x + 1][y] == "r":
                            termap[x][y] = "#"
                            terpx[x, y] = (135, 135, 135)
                        elif termap[x][y] == "g" and termap[x - 1][y] == "r":
                            termap[x][y] = "#"
                            terpx[x, y] = (135, 135, 135)
                        elif termap[x][y] == "g" and termap[x][y + 1] == "r":
                            termap[x][y] = "#"
                            terpx[x, y] = (135, 135, 135)
                        elif termap[x][y] == "g" and termap[x][y - 1] == "r":
                            termap[x][y] = "#"
                            terpx[x, y] = (135, 135, 135)
                            #New apprach: scan around pixel, if next over is not equal to this ine, set to beach

        ###PRINT###
        if printh:
            for key, value in termap.items():
                out = ""
                for k, v in value.items():
                    out = out + str(v)
                print(out)

            terimg.show()

        self.mp = termap
        self.img = terimg
        self.d = dimensions
class bouncingBall:

    loc_Vector = None
    velocity_Vector = None
    screen = None
    accelerationVector = None
    perlinGenerator = perlin.PerlinNoiseFactory(1)
    tX = 0
    tY = 0

    def setupBoard(self, width=500, height=500):
        self.t = turtle.Turtle()
        self.screen = turtle.Screen()
        self.screen.colormode(255)
        self.t.fillcolor((100, 1, 1))
        self.t.ht()
        self.screen.setup(width, height)
        self.screen.tracer(0)
        self.t.speed(0)
        self.screen.setworldcoordinates(0, 0, width, height)
        self.loc_Vector = np.array(
            [float(random.randrange(width)),
             float(random.randrange(height))])
        self.velocity_Vector = np.array([0.55, 0.13])
        self.accelerationVector = np.array([0.00, 0.00])

    def bounce(self):
        if self.loc_Vector[0] > self.screen.screensize(
        )[0] or self.loc_Vector[0] < 0:
            print("width   " + str(self.screen.screensize()[0]))
            self.velocity_Vector[0] *= -1
            self.accelerationVector[0] *= -1
            self.tX += 0.00001 * -1

        if self.loc_Vector[1] > self.screen.screensize(
        )[1] or self.loc_Vector[1] < 0:
            print("height   " + str(self.screen.screensize()[0]))
            self.velocity_Vector[1] *= -1
            self.accelerationVector[1] *= -1
            self.tY += 0.00001 * -1

    def drawCircle(self):
        self.t.clear()
        self.loc_Vector += self.velocity_Vector
        self.bounce()
        #print("xPosition: " + str(self.loc_Vector[0]) + " yPosition" + str(self.loc_Vector[1]))
        self.t.goto(self.loc_Vector[0], self.loc_Vector[1])
        self.t.dot(30)
        self.screen.update()

    def perlinGenerate(self, x):
        num = self.perlinGenerator(x)
        #print(num)
        return num

    def animate(self):
        while True:
            self.drawCircle()
            self.tX += 0.00001
            self.tY += 0.00001
            self.accelerationVector = np.array(
                [self.perlinGenerate(self.tX),
                 self.perlinGenerate(self.tY)])
            self.velocity_Vector += self.accelerationVector
Ejemplo n.º 12
0
import perlin
from graphics import *
import emojis as emj

noise_factory = perlin.PerlinNoiseFactory(2, octaves=3)

noise = [[0 for x in range(100)] for y in range(100)]

map = ''

for i in range(100):
    for j in range(100):
        noise[i][j] = noise_factory(i / 10, j / 10)

        if noise[i][j] < 0.0:
            map += emj.wave_emoji
        elif noise[i][j] < 0.15:
            map += emj.beach_emoji
        elif noise[i][j] < 0.5:
            map += emj.forest_emoji
        elif noise[i][j] < 1:
            map += emj.mountain_emoji
        else:
            map += '1'
    map += '\n'

print(noise)
print(map)
print('\N{grinning face with smiling eyes}')
Ejemplo n.º 13
0
    def run(self,
            source_paths,
            seed=0,
            exclude_paths=[],
            exclude_files=[],
            max_output_length=-1,
            big_chunk_interval=[1, 10, 5],
            big_chunk_length=[500, 5000],
            noise_rate_1=1,
            noise_rate_2=1,
            noise_rate_3=1,
            chunk_duration_range=[5, 300],
            chunk_overlap_range=[0.4, 0.95],
            chunk_skip_range=[300, 5000],
            remove_duplicates=True,
            split_interval=5 * 60,
            file_start_index=0,
            output_mels=False,
            correct_mod_dates=True,
            remove_silence=False,
            max_num_files=-1,
            time_scale=1):
        random.seed(seed)

        noise_offset = random.uniform(0, 10000000)

        noise = perlin.PerlinNoiseFactory(1, octaves=3, unbias=True)

        audio_files = []
        for path in source_paths:
            audio_files += glob.glob(path + "/**/*", recursive=True)

        if max_num_files > 0:
            audio_files = audio_files[:max_num_files]

        filtered_files = []
        for file in audio_files:
            if file.endswith(".mp3") or file.endswith(".wav"):
                valid = True
                for exclude_path in exclude_paths:
                    if exclude_path in file:
                        valid = False
                for exclude_file in exclude_files:
                    if file.endswith(exclude_file):
                        valid = False
                if valid:
                    filtered_files.append(file)
        audio_files = filtered_files

        # get corrected modified dates from other file
        if correct_mod_dates:
            corrected_modified_dates = dict()
            for file in audio_files:
                files_with_same_name = glob.glob(
                    os.path.splitext(file)[0] + ".*")

                if len(files_with_same_name) > 1:
                    files_with_same_name.sort(
                        key=lambda f: os.path.getmtime(f))
                    corrected_modified_dates[file] = os.path.getmtime(
                        files_with_same_name[0])

        # sort by date modified
        def get_date_modified(file):
            return corrected_modified_dates[
                file] if file in corrected_modified_dates else os.path.getmtime(
                    file)

        # correct the timestamps of this folder
        fs = list(
            filter(
                lambda f:
                "/Volumes/Shared/Projects/Music/Image-Line/Data/Projects/Oldies"
                in f, audio_files))
        for file in fs:
            date = datetime.fromtimestamp(get_date_modified(file))
            if date.day == 21 and date.month == 10 and date.year == 2008:
                date = date.replace(year=2004)
                corrected_modified_dates[file] = date.timestamp()

        audio_files.sort(key=get_date_modified)

        # remove duplicates
        if remove_duplicates:
            deduped_list = []
            filenames = []
            for file in audio_files:
                name = os.path.splitext(os.path.basename(file))[0]
                if name not in filenames:
                    filenames.append(name)
                    deduped_list.append(file)

            audio_files = deduped_list

        def file_interestingness(index):
            if index >= len(audio_files) or index == 0:
                return 1
            else:
                filename = get_filename_no_ext_no_numbers(audio_files[index])
                prev_filename = get_filename_no_ext_no_numbers(
                    audio_files[index - 1])
                return 1 - SequenceMatcher(None, prev_filename,
                                           filename).ratio()

        part_number = 1
        timestamp = int(time.time())

        output_metadata_files_path = "output/{0}-{1}.files.txt".format(
            seed, timestamp)
        output_metadata_times_path = "output/{0}-{1}.times.txt".format(
            seed, timestamp)
        metadata_files_file = open(output_metadata_files_path, "w")
        metadata_times_file = open(output_metadata_times_path, "w")

        num_files_scanned = 1
        fingerprint = AudioSegment.empty()

        total_duration = 0
        all_mels = None
        prev_filename = None

        low_interestingness_time = 0

        for i in range(file_start_index, len(audio_files)):
            file = audio_files[i]
            filename = os.path.basename(file)
            filename_no_extension = os.path.splitext(filename)[0]
            try:
                # open the audio files
                audio_file = AudioSegment.from_file(file)

                # remove silence
                if remove_silence:
                    non_silent_parts = silence.split_on_silence(
                        audio_file, 1000, silence_thresh=-40)
                    non_silent = AudioSegment.empty()
                    for part in non_silent_parts:
                        non_silent += part
                    audio_file = non_silent

                file_duration = audio_file.duration_seconds * 1000

                # skip super short files
                if file_duration < 10:
                    continue

                # calculate interestingness
                interestingness = file_interestingness(i)

                if interestingness == 0:
                    low_interestingness_time += 1

                    if low_interestingness_time > 10:
                        interestingness = 1
                        low_interestingness_time = 0

                    if file_interestingness(i + 1) > 0:
                        interestingness = 1

                # save metadata
                metadata_files_file.write("{0}~{1}\n".format(
                    get_date_modified(file), filename.replace("~", " ")))
                metadata_times_file.write("{0}\n".format(
                    round(total_duration, 2)))

                chunk_start_time = None

                chunk_skip_rate = random.uniform(
                    chunk_skip_range[0], chunk_skip_range[1]) * time_scale

                chunk_start_time = 0

                file_mels = None

                num_big_chunks = 0
                if file_duration > 30000 and file_duration < 240000:
                    num_big_chunks = 1
                else:
                    num_big_chunks = math.floor(file_duration / 120000)
                if num_big_chunks > 5:
                    num_big_chunks = 5

                if interestingness == 0:
                    num_big_chunks = 0

                big_chunk_interval = file_duration / (num_big_chunks + 1)

                next_bigchunk = big_chunk_interval + random.uniform(
                    -10000, 10000)

                while True:
                    noise_env_1 = noise(total_duration * noise_rate_1 +
                                        noise_offset) * 0.5 + 0.5
                    noise_env_2 = noise(total_duration * noise_rate_2 + 1000 +
                                        noise_offset) * 0.5 + 0.5
                    noise_env_3 = noise(total_duration * noise_rate_3 + 2000 +
                                        noise_offset) * 0.5 + 0.5

                    chunk_duration = (noise_env_1 * (chunk_duration_range[1] - chunk_duration_range[0]) + \
                                     chunk_duration_range[0]) * time_scale
                    chunk_overlap_ratio = (noise_env_2 * (chunk_overlap_range[1] - chunk_overlap_range[0]) + \
                                          chunk_overlap_range[0])

                    big_chunk = False

                    # big chunks are longer snippets taken from the track
                    if file_duration > 10000 and chunk_start_time > next_bigchunk and num_big_chunks > 0:
                        chunk_duration = random.uniform(
                            big_chunk_length[0],
                            big_chunk_length[1]) * time_scale
                        next_bigchunk += big_chunk_interval + random.uniform(
                            -10000, 10000)
                        num_big_chunks -= 1
                        big_chunk = True
                        print("Big chunk!")
                    else:
                        big_chunk = False

                    chunk_duration = min(file_duration,
                                         chunk_duration) * time_scale
                    chunk_duration_half = int(chunk_duration / 2)

                    chunk_end_time = chunk_start_time + chunk_duration

                    # if it's reached the end go to the next file
                    if chunk_end_time > file_duration:
                        break

                    chunk = audio_file[chunk_start_time:chunk_end_time]

                    chunk_start_time += chunk_skip_rate

                    # if the chunk is silent skip it
                    if chunk.dBFS == -float("infinity"):
                        print("Silent chunk skipped.")
                        break

                    # analyse
                    if output_mels:
                        data = np.array(list(chunk.get_array_of_samples()))
                        mels = librosa.feature.melspectrogram(y=data,
                                                              sr=44100,
                                                              n_mels=100,
                                                              power=1)
                        mels = np.mean(mels, axis=1)
                        if not file_mels:
                            file_mels = np.reshape(mels, (100, 1))
                        else:
                            file_mels = np.vstack([file_mels, mels])

                    # fade the ends of the chunk
                    fade_time = chunk_duration_half
                    if big_chunk:
                        fade_time = min(chunk_duration_half, 400)
                        chunk_overlap_ratio = 0.2
                    chunk = chunk.fade_in(fade_time).fade_out(fade_time)

                    # attenuate chunk
                    chunk = chunk.apply_gain(-12 if not big_chunk else -6)

                    prev_len = fingerprint.duration_seconds
                    if fingerprint.duration_seconds == 0:
                        fingerprint = chunk
                    else:
                        # add the silence to the end of the fingerprint to make room for the new chunk
                        fingerprint = fingerprint + AudioSegment.silent(
                            duration=chunk_duration *
                            (1 - chunk_overlap_ratio))

                        curr_position = fingerprint.duration_seconds * 1000 - chunk_duration

                        # overlap the chunk with the fingerprint
                        fingerprint = fingerprint.overlay(
                            chunk, position=curr_position)
                    total_duration += fingerprint.duration_seconds - prev_len

                print(file, ":", i + 1, "of", len(audio_files))
                print("Length: {0:.3f} mins".format(total_duration / 60))

                # split and clear at regular intervals
                if fingerprint.duration_seconds > split_interval + 10:
                    source_paths = "output/{0}-{1}-P{2}.wav".format(
                        seed, timestamp, part_number)
                    part_file = fingerprint[:split_interval * 1000]
                    part_file.export(open(source_paths, "wb"), format="wav")

                    fingerprint = fingerprint[split_interval * 1000:]
                    part_number += 1

                # export and quit if max length reached
                if max_output_length > 0 and total_duration > max_output_length:
                    break

            except (KeyboardInterrupt, SystemExit):
                raise
            except:
                print("Failed:", file)

            if output_mels and file_mels is not None:
                if file_mels.shape[1] > 1:
                    file_mels = np.mean(file_mels, axis=1)

                if all_mels is None:
                    all_mels = file_mels
                else:
                    all_mels = np.hstack([all_mels, file_mels])

            num_files_scanned += 1

        source_paths = "output/{0}-{1}-P{2}.wav".format(
            seed, timestamp, part_number)
        fingerprint.export(open(source_paths, "wb"), format="wav")

        # join all the parts
        print("Joining parts...")
        self.join_parts(seed, timestamp)

        print("FINISHED")