Esempio n. 1
0
    async def _bot_info(self, ctx):
        shard_ping = int(self.disco.shards[ctx.guild.shard_id].ws.latency *
                         1000)
        uptime = get_length(
            (datetime.utcnow() - self.disco.started_at).total_seconds() * 1000,
            True)

        em = discord.Embed(
            colour=self.disco.color[0],
            title=l(ctx, 'commands.botinfo.statistics'),
            description=l(
                ctx, 'commands.botinfo.links', {
                    "support": "https://discord.gg/qN5886E",
                    "invite": "https://discobot.site",
                    "donate": "https://patreon.com/discobot",
                    "vote": "https://botsparadiscord.xyz/bots/disco",
                    "github": "https://github.com/Naegin/Disco"
                })
        ).set_author(name=ctx.me.name, icon_url=ctx.me.avatar_url).set_footer(
            text=l(ctx, 'commons.createdBy', {"creator": "Naegin#0049"}),
            icon_url='https://cdn.naeg.in/i/naegin-avatar.gif').set_thumbnail(
                url=ctx.me.avatar_url).add_field(
                    name=l(ctx, 'commands.botinfo.generalInfoTitle'),
                    value=l(
                        ctx, 'commands.botinfo.generalInfoDescLeft', {
                            "shard": ctx.guild.shard_id + 1,
                            "shards": len(self.disco.shards),
                            "ping": shard_ping,
                            "servers": len(self.disco.guilds),
                            "members": len(set(self.disco.get_all_members())),
                            "players": len(self.disco.wavelink.players),
                            "nodes": len(self.disco.wavelink.nodes)
                        })).add_field(
                            name='\u200b',
                            value=l(
                                ctx, 'commands.botinfo.generalInfoDescRight', {
                                    "uptime": uptime,
                                    "messages":
                                    f'{self.disco.read_messages:,}',
                                    "commands":
                                    f'{self.disco.invoked_commands:,}',
                                    "played": f'{self.disco.played_tracks:,}'
                                }))

        for identifier, node in self.disco.wavelink.nodes.items():
            stats = node.stats

            em.add_field(
                name=f'**LAVALINK NODE {identifier}**',
                value=l(
                    ctx, 'commands.botinfo.nodeInfo', {
                        "region": node.region.title().replace("_", " "),
                        "uptime": get_length(stats.uptime, True),
                        "stats": stats,
                        "memory": naturalsize(stats.memory_used)
                    }))

        await ctx.send(content=ctx.author.mention, embed=em)
Esempio n. 2
0
    def _generate_multiplier_map_tex(self):
        multiplier_map = pyglet.image.create(self.sample_size, self.sample_size)
        data = ""
        
        # Useful fractions
        half_sample_size = self.sample_size / 2.0
        quarter_sample_size = self.sample_size / 4.0
        sample_center = (half_sample_size, half_sample_size)
        
        # Iterate over pixels
        for y in xrange(self.sample_size):
            for x in xrange(self.sample_size):
                pixel = (x, y)
                
                # Which quadrant is the pixel in?
                quadrant = self.get_quadrant(pixel)
                if not quadrant:
                    data += chr(0) * 3 + chr(255)  # RGBA black
                    continue
                
                # Find the shape compensation value. First, find the distance
                # to the quadrant centre.
                center = self.get_quadrant_center(quadrant)
                distance = utils.get_length(pixel, center)
                # Get the angle between the camera direction and the texel
                angle = math.atan(distance / quarter_sample_size)
                compensation_value = math.cos(angle)

                # Find the Lambert cosine multiplier.
                distance = utils.get_length(pixel, sample_center)
                distance /= half_sample_size
                distance *= math.pi / 2.0
                lambert_value = math.cos(distance)
                if lambert_value < 0.0:
                    lambert_value = 0.0                
                
                # Get bytes for the image
                multiplier = compensation_value * lambert_value
                int_value = int(round(multiplier * 255.0))
                pixel_data = chr(int_value) * 3 + chr(255)  # RGBA
                data += pixel_data
        
        # Get the texture
        multiplier_map.set_data(multiplier_map.format,
                                multiplier_map.pitch, data)
        multiplier_map.save("/tmp/mult.png")
        
        # TODO: Remove
        multiplier_map = pyglet.image.load("textures/mult.png")
        
        return multiplier_map.get_texture()
Esempio n. 3
0
    async def _queue(self, ctx, page: int = 1):
        player = self.get_player(ctx.guild.id)
        if not player.queue:
            return await ctx.send(
                f'{self.lite.emoji["false"]} **{ctx.author.name}**, a fila de '
                'reprodução está vazia.')

        length = get_length(sum(track.length for track in player.queue), True)

        pages = ceil(player.size / 12)
        if not 0 < page <= pages:
            page = 1

        skip = (page - 1) * 12
        current = player.current

        txt = f'Tocando Agora: [**{current}**]({current.uri}) `[{get_length(current.length)}]` - {current.requester.mention}\n\n'
        for i in range(skip, skip + 12):
            try:
                track = player.queue[i]
            except IndexError:
                continue

            txt += f'**`»`** `{i+1}` [**{track}**]({track.uri}) `[{get_length(track.length)}]` - {track.requester.mention}\n'

        em = discord.Embed(
            colour=self.lite.color[1],
            title='Fila de Reprodução',
            description=txt).set_author(
                name=ctx.guild.name,
                icon_url=ctx.guild.icon_url).set_thumbnail(
                    url=ctx.me.avatar_url).set_footer(
                        text=f'Duração: {length} | Página {page}/{pages}')

        await ctx.send(content=ctx.author.mention, embed=em)
Esempio n. 4
0
 def generate_wall_lightmap(self):
     """Create a lightmap for the wall, and tex coords for it.
     
     """
     height = 32.0
     
     # Add up the wall lengths to find the width
     total_wall_length = 0.0
     for wall in self.walls:
         total_wall_length += utils.get_length(wall[0], wall[1])
     room_height = self.ceiling_height - self.floor_height
     width = (total_wall_length / room_height) * height
     
     # Round width to next power of two
     pot = 1
     while pot <= 2048:
         if width > pot:
             pot *= 2
         else:
             width = float(pot)
             break
     
     if width > 2048.0:
         height *= 2048.0 / width
         width = 2048.0
     
     # Create lightmap
     self.wall_lightmap = Lightmap(width, height)
     
     # Info to generate radiosity
     self.lightmaps.append((self.wall_lightmap,
                            self.get_position_for_wall_lightmap_texel))
Esempio n. 5
0
    async def on_track_event(self, event):
        player = event.player

        if isinstance(event, TrackStart):
            self.disco.played_tracks += 1
            track = event.track

            if not player.repeat:
                await player.send(player.t('events.trackStart', {"track": track,
                                                                 "emoji": self.disco.emoji["download"],
                                                                 "length": 'LIVESTREAM' if track.is_stream else
                                                                 get_length(track.length)}))

            if self.redis and (vc := self.disco.get_channel(int(player.channel_id))) and vc.members:
                payload = {
                    "type": "track_start",
                    "track": {
                        "title": track.title,
                        "author": track.author,
                        "length": track.length,
                        "stream": track.is_stream
                    },
                    "users": [member.id for member in vc.members if not member.bot]
                }

                await self.redis.publish_json('activity', payload)
Esempio n. 6
0
    async def _shards(self, ctx):
        table = PrettyTable([
            'Shard ID', 'Latency', 'Uptime', 'Guilds', 'Members', 'Last Update'
        ])

        for shard in self.disco._shards.all():
            now = datetime.utcnow()
            latency = f'{int(shard.latency * 1000)}ms' if shard.latency else 'Unknown'
            guilds = f'{shard.guilds:,}' if shard.guilds else 'Unknown'
            members = f'{shard.members:,}' if shard.members else 'Unknown'
            uptime = get_length((now - shard.launched_at).total_seconds() * 1000, True) \
                if shard.launched_at else 'Unknown'
            last_update = get_length((now - shard.last_update).total_seconds() * 1000, True) \
                if shard.last_update else 'Unknown'

            table.add_row(
                [shard.id, latency, uptime, guilds, members, last_update])

        await ctx.send(f'```{table.get_string()}```')
Esempio n. 7
0
def set_length():
    connection = psycopg2.connect(SQLALCHEMY_DATABASE_URI)
    cursor = connection.cursor()
    for root, dir, files in os.walk(CUSTOM_STATIC_PATH):
        file_keys = set()
        for filename in files:
            youtube_id = filename.split('.')[0]
            if youtube_id not in file_keys:
                length = get_length(os.path.join(root, filename))
                cursor.execute("UPDATE videos SET length = {length} WHERE youtube_id = '{youtube_id}' ".format(
                    length=length, youtube_id=youtube_id))
                connection.commit()
                file_keys.add(youtube_id)
Esempio n. 8
0
    async def _queue(self, ctx, page: int = 1):
        player = self.get_player(ctx.guild.id)
        if not player.queue:
            return await ctx.send(l(ctx, 'errors.emptyQueue', {"author": ctx.author.name,
                "emoji": self.disco.emoji["false"]}))

        length = get_length(sum(track.length for track in player.queue), True)

        per_page = 10
        pages = ceil(player.size / per_page)
        if not 0 < page <= pages:
            page = 1

        skip = (page - 1) * per_page
        current = player.current
        tracks = player.queue[skip:skip+per_page]

        txt = l(ctx, 'commands.queue.currentTrack', {"track": current,
            "length": get_length(current.length)})

        for i, t in enumerate(tracks, skip+1):
            txt += f'**`»`** `{i}` [**{t}**]({t.uri}) `[{get_length(t.length)}]` - {t.requester.mention}\n'

        em = discord.Embed(
            colour=self.disco.color[1],
            title=l(ctx, 'commands.queue.name'),
            description=txt
        ).set_author(
            name=ctx.guild.name,
            icon_url=ctx.guild.icon_url
        ).set_thumbnail(
            url=ctx.me.avatar_url
        ).set_footer(
            text=l(ctx, 'commands.queue.details', {"length": length,
                "page": page, "pages": pages})
        )

        await ctx.send(content=ctx.author.mention, embed=em)
Esempio n. 9
0
    async def _now_playing(self, ctx):
        player = self.get_player(ctx.guild.id)
        if not player.current:
            return await ctx.send(l(ctx, 'errors.notPlaying', {"author": ctx.author.name,
                "emoji": self.disco.emoji["false"]}))

        track = player.current
        em = discord.Embed(
            colour=self.disco.color[0],
            description=l(ctx, 'commands.nowplaying.text', {"track": track,
                "length": get_length(track.length)})
        )

        await ctx.send(embed=em, delete_after=15)
Esempio n. 10
0
def chat():
    req_data = request.get_json()
    message = utils.normalize_string((req_data['message']))
    try:
        indices = utils.get_batched_indices(message)
    except KeyError:
        reply = "I did not understand your language!!, check the spelling perhaps"
    else:
        numpy_array = utils.list2numpy(indices)
        length = utils.get_length(numpy_array)
        reply = redis_db.process(numpy_array, length)

    resp = jsonify(reply=reply)
    return resp
Esempio n. 11
0
    def testlength(self):
        for i in range(len(self.tests)):
            osr2mp4 = self.tests[i]

            expectlength = 0
            starttime = osr2mp4.starttimne
            endtime = osr2mp4.endtime
            if osr2mp4.endtime == -1:
                expectlength += 5
                endtime = osr2mp4.replay_event[osr2mp4.end_index][
                    Replays.TIMES] / osr2mp4.settings.timeframe
            expectlength += endtime - starttime

            outputlength = get_length(osr2mp4.settings.output)
            print(outputlength, expectlength)
            self.assertLessEqual(abs(expectlength - outputlength), 2)
Esempio n. 12
0
def process_batch_list(batch_list, config, video_path):
    global UTILIZATION
    if video_path:
        if batch_list:
            print("Video path specified, ignoring batch list")
        batch_list = [video_path]
    else:
        with Path(batch_list).open("r") as f:
            batch_list = f.read().strip().replace("'",
                                                  "").replace('"',
                                                              "").split("\n")
    print(f"Batch {batch_list}")
    UTILIZATION = load_utilization()
    print(f"Utilization for {MONTH}: {UTILIZATION[MONTH]}")
    #UTILIZATION[MONTH] = 480*60

    # load list
    for item in batch_list:
        # Skip commented ones
        if item[0].strip() == "#":
            print(f"skipping {item}")
            continue
        if UTILIZATION[MONTH] > MAX_UTILIZATION:
            warnings.warn("MAX utilization reached")
            break
        print(f"Checking if {item} exists...")
        item = search_folder_for_video(item)
        if item:
            print("Working on ", Path(item).name)
            config, _config_parser = utils.process_config(opts.config,
                                                          video_path=item)
            total_length = utils.get_length(
                item,
                Path(config.ffmpeg_path).parent / "ffprobe")
            total_length = round(total_length + 7.49, 15)
            if total_length > 1200:  # should be longer than 20 minutes
                success = process_item(config, _config_parser)
                if success:
                    UTILIZATION[MONTH] += total_length
            else:
                print(item, "less than 1000 seconds, skipping", total_length)
        else:
            print(item, "not found")

    print("New Utilization", MONTH, UTILIZATION[MONTH])
    save_utilization(UTILIZATION)
Esempio n. 13
0
    async def on_track_event(self, event):
        player = event.player
        if player.repeat:
            track = player.repeat
        elif player.size:
            track = player.queue.pop(0)
        else:
            player.current = None
            return await player.send(l(player, 'events.queueEnd', {
                "emoji": self.disco.emoji["alert"]}))

        await player.play(track)
        self.disco.played_tracks += 1

        if not player.repeat:
            await player.send(l(player, 'events.trackStart', {"track": track,
                "emoji": self.disco.emoji["download"], "length": get_length(track.length)}))
Esempio n. 14
0
 def forward(self, seq, mask):
     # seq: (N, C, L)
     # mask: (N, L)
     max_len = seq.size()[2]
     length = get_length(mask)
     seq = torch.transpose(seq, 1, 2)  # to (N, L, C)
     packed_seq = nn.utils.rnn.pack_padded_sequence(seq,
                                                    length,
                                                    batch_first=True,
                                                    enforce_sorted=False)
     outputs, _ = self.bid_rnn(packed_seq)
     outputs = nn.utils.rnn.pad_packed_sequence(outputs,
                                                batch_first=True,
                                                total_length=max_len)[0]
     outputs = outputs.view(-1, max_len, 2,
                            self.hidden_size).sum(2)  # (N, L, C)
     outputs = self.out_dropout(outputs)  # output dropout
     return torch.transpose(outputs, 1, 2)  # back to: (N, C, L)
Esempio n. 15
0
def gen_complete_test_set(tfc_reader):
    """
    It generates Complete Test Set for gate having all
    positive controls and no constant line, garbage
    For NCT and GT library
    """
    rev_non_tfc(tfc_reader)

    length = get_length()

    tp = list(itertools.product([0, 1], repeat=length))
    tfc_gates = get_tfc_gates()

    output_writter = open("cts.txt", 'w')
    output_writter.write(str("||"))

    for i in range(0, len(tp)):
        pattern = tp[i]
        Flag = 0

        olist = initalize_output_dict(pattern)

        for gate in tfc_gates:
            gate = gate.split()

            gate_len = len(gate)
            # Check if all values are 1 or not
            if (all(x == 1 for x in olist.values())):
                if gate_len >= 2:
                    Flag = 1

            olist = gate_operation(gate, olist)

        if Flag == 1:
            write_output(output_writter, olist)

    output_writter.seek(0, 0)
    output_writter.close()

    return
Esempio n. 16
0
def gen_level_wise_output(tfc_reader):
    """
    It generates leve_wise_output for gate having all
    positive controls and no constant line, garbage
    For NCT and GT library
    """
    gen_non_tfc(tfc_reader)

    length = get_length()

    # tp -- TestPattern
    tp = list(itertools.product([0, 1], repeat=length))
    tfc_gates = get_tfc_gates()

    output_writter = open("lwo.txt", 'w')
    for i in range(0, len(tp)):
        pattern = tp[i]

        output_writter.write(str("||"))
        olist = initalize_output_dict(pattern)

        # Printing Input Pattern
        write_output(output_writter, olist)

        for gate in tfc_gates:
            gate = gate.split()

            olist = gate_operation(gate, olist)

            # Printing Level-Wise-Output
            write_output(output_writter, olist)

        output_writter.write(str("\n"))
    output_writter.seek(0, 0)
    output_writter.close()

    return
Esempio n. 17
0
    def __init__(self,
                 input_video_path,
                 output_dir,
                 deinterlace='drop',
                 step=5,
                 duration=1000,
                 start=None,
                 end=None,
                 prefix=None):
        '''
        the Extractor class contains all the necessary information to extract
        images from a video using ffmpeg. By default, it will extract a frame
        every step (5) seconds from the video

        :param input_video_path:  full path to the video
        :param output_dir: directory to store transcoded frames to
        :param step: extract a frame every 'step' seconds
        :param duration: duration in milliseconds to extract every step seconds
        :param start: starting time to extracting images
        :param end: ending time to finish extracting images
        :param prefix: frame starting prefix to prepend to extracted frames
        
        :Example:
        Extract every 5th frame using drop deinterlacing
        Extractor('/Volumes/data/D008_03HD.mov', '/Volumes/data/out/D008_03HD', 'drop', 5)
        '''
        self.input_video_path = input_video_path
        _, fname = os.path.split(input_video_path)
        self.key = fname.split('.')[0]
        self.output_dir = '{0}/{1}/imgs'.format(output_dir, self.key)
        utils.ensure_dir(self.output_dir)
        self.seconds_counter = 0
        self.duration = duration
        self.step = step
        self.start = start
        self.end = end
        self.prefix = prefix
        self.video_length = utils.get_length(input_video_path)
        self.fps = 29.97  #utils.get_framerate(input_video_path)
        self.single_frame = False
        if duration is None:
            self.duration = 1e3 / self.fps + 1  # default to a single frame
            self.single_frame = True
        if prefix is None:
            self.prefix = "f"
        # To time methods, uncomment line below
        #self.times = []
        self.deinterlace = deinterlace
        self.start_iso_time = datetime.now()
        self.dive = 'Unknown'
        p = fname.split('_')
        for f in p:
            if utils.validate_iso8601(f):
                self.start_iso_time = datetime.strptime(f, '%Y%m%dT%H%M%SZ')
                break
        if 'D' in fname:
            p = re.compile(r'(?P<dive>\w+)_(?P<timecode>\w+)?')
            match = re.search(pattern=p, string=fname)
            if (match):
                if match.group('timecode') and utils.validate_iso8601(
                        match.group('timecode')):
                    self.start_iso_time = datetime.strptime(
                        match.group('timecode'), '%Y%m%dT%H%M%SZ')
                else:
                    self.start_iso_time = datetime.now()
                if match.group('dive'):
                    self.dive = match.group('dive')

        print('Dive {} timecode start {} length {} seconds'.format(
            self.dive, self.start_iso_time, self.video_length))
Esempio n. 18
0
    async def _play(self, ctx, *, query):
        if not web_url(query):
            query = f'ytsearch:{query}'

        results = await self.disco.wavelink.get_tracks(query)
        if not results:
            return await ctx.send(l(ctx, 'commands.play.noResults', {"author": ctx.author.name,
                "emoji": self.disco.emoji["false"]}))

        player = ctx.player

        if hasattr(results, 'tracks'):
            total_length = 0
            tracks = results.tracks[:1500-player.size]
            for track in tracks:
                total_length += track.length
                player.queue.append(DiscoTrack(ctx.author, track.id, track.info))

            name = results.data['playlistInfo']['name']
            await player.send(l(ctx, 'commands.play.playlistAdded', {"playlist": name,
                "emoji": self.disco.emoji["plus"], "length": get_length(total_length),
                "added": len(tracks)}))
        else:
            if len(results) == 1:
                track = results[0]

                player.queue.append(DiscoTrack(ctx.author, track.id, track.info))

                await player.send(l(ctx, 'commands.play.trackAdded', {"track": track,
                    "emoji": self.disco.emoji["plus"], "length": get_length(track.length)}))
            else:
                self.waiting.add(ctx.author.id)

                tracks = results[:10]
                options = ''
                for i, track in enumerate(tracks, 1):
                    options += f'**`»`** `{i}` [**{track}**]({track.uri}) `[{get_length(track.length)}]`\n'

                em = discord.Embed(
                    colour=self.disco.color[0],
                    title=l(ctx, 'commands.play.chooseOne'),
                    description=options
                ).set_author(
                    name=l(ctx, 'commands.play.searchResults'),
                    icon_url=ctx.guild.icon_url
                ).set_thumbnail(
                    url=ctx.me.avatar_url
                ).set_footer(
                    text=l(ctx, 'commands.play.typeToCancel')
                )

                q = await player.send(content=ctx.author.mention, embed=em)
                cancel = l(ctx, 'commons.exit').lower()

                try:
                    a = await self.disco.wait_for('message', timeout=120,
                        check=lambda c: c.channel.id == q.channel.id and c.author.id == ctx.author.id \
                            and c.content and (c.content.isdigit() and 0 < int(c.content) <= len(tracks)
                            or c.content.lower() == cancel))
                except Timeout:
                    a = None

                if not a or a.content.lower() == cancel:
                    if ctx.author.id in self.waiting:
                        self.waiting.remove(ctx.author.id)

                    return await q.delete()

                track = tracks[int(a.content) - 1]

                player.queue.append(DiscoTrack(ctx.author, track.id, track.info))
                if ctx.author.id in self.waiting: self.waiting.remove(ctx.author.id)
                await q.edit(content=l(ctx, 'commands.play.trackAdded', {"track": track,
                    "emoji": self.disco.emoji["plus"], "length": get_length(track.length)}),
                    embed=None)

        if not player.current:
            await player.play(ctx.player.queue.pop(0))
            await player.send(l(ctx, 'events.trackStart', {"author": ctx.author.name,
                "emoji": self.disco.emoji["download"], "track": player.current,
                "length": get_length(player.current.length)}))
            
            self.disco.played_tracks += 1
Esempio n. 19
0
    def generate_triangulated_data(self):
        """Generate triangles to draw floor, ceiling and walls.
        
        Must be called after shared walls have been set.
        
        """        
        # Get 2D triangles for the floor and ceiling
        self.triangles = utils.triangulate(self.vertices)
        # Put the vertex attributes in an interleaved array
        floor_data = []
        ceiling_data = []
        for triangle in self.triangles:
            for point in triangle:
                # Floor data
                # 3D vertex coords
                floor_data.append(point[0])
                floor_data.append(point[1])
                floor_data.append(self.floor_height)
                # 2D texture coords
                # Take the longest dimension as 1m
                floor_texture_ratio = (float(self.floor_texture.width) /
                                       float(self.floor_texture.height))
                if floor_texture_ratio < 1.0:
                    floor_texture_ratio = 1.0 / floor_texture_ratio
                # Apply rotation
                tex_x = (point[0] * math.cos(self.floor_texture_angle) -
                         point[1] * math.sin(self.floor_texture_angle))
                tex_y = (point[0] * math.sin(self.floor_texture_angle) +
                         point[1] * math.cos(self.floor_texture_angle))
                # Apply scale
                tex_x /= self.floor_texture_scale
                tex_y /= self.floor_texture_scale
                # Correct ratio and add to list
                floor_data.append(tex_x * floor_texture_ratio)
                floor_data.append(tex_y)
                # Lightmap coords
                min_x, max_x, min_y, max_y = self.bounding_box
                lm_x = (point[0] - min_x) / (max_x - min_x)
                lm_y = (point[1] - min_y) / (max_y - min_y)
                floor_data.extend([lm_x, lm_y])

            # Ceiling triangles need to be reversed to get the correct winding
            for point in reversed(triangle):
                # Ceiling data
                # 3D vertex coords
                ceiling_data.append(point[0])
                ceiling_data.append(point[1])
                ceiling_data.append(self.ceiling_height)
                # 2D texture coords
                # Take the longest dimension as 1m
                ceiling_texture_ratio = (float(self.ceiling_texture.width) /
                                         float(self.ceiling_texture.height))
                if ceiling_texture_ratio < 1.0:
                    ceiling_texture_ratio = 1.0 / ceiling_texture_ratio
                # Apply rotation
                tex_x = (point[0] * math.cos(self.ceiling_texture_angle) -
                         point[1] * math.sin(self.ceiling_texture_angle))
                tex_y = (point[0] * math.sin(self.ceiling_texture_angle) +
                         point[1] * math.cos(self.ceiling_texture_angle))
                # Apply scale
                tex_x /= self.ceiling_texture_scale
                tex_y /= self.ceiling_texture_scale
                # Correct ratio and add to list
                ceiling_data.append(tex_x * ceiling_texture_ratio)
                ceiling_data.append(tex_y)
                # Lightmap coords
                min_x, max_x, min_y, max_y = self.bounding_box
                lm_x = (point[0] - min_x) / (max_x - min_x)
                lm_y = (point[1] - min_y) / (max_y - min_y)
                ceiling_data.extend([lm_x, lm_y])
        
        # Floor: put it in an array of GLfloats
        self.floor_data_count = len(floor_data) / 7
        floor_data = (GLfloat * len(floor_data))(*floor_data)
        # Add the data to the FBO
        glBindBuffer(GL_ARRAY_BUFFER, self.floor_data_vbo)
        glBufferData(GL_ARRAY_BUFFER, sizeof(floor_data), floor_data,
                     GL_STATIC_DRAW)

        # Ceiling: put it in an array of GLfloats
        self.ceiling_data_count = len(ceiling_data) / 7
        ceiling_data = (GLfloat * len(ceiling_data))(*ceiling_data)
        # Add the data to the FBO
        glBindBuffer(GL_ARRAY_BUFFER, self.ceiling_data_vbo)
        glBufferData(GL_ARRAY_BUFFER, sizeof(ceiling_data), ceiling_data,
                     GL_STATIC_DRAW)
        
        # Now the walls. If we're okay with wraps around corners, we need to 
        # know the total wall length first.
        self.lightmaps = []
        self.generate_floor_and_ceiling_lightmaps()
        self.generate_wall_lightmap()
        if self.wall_texture_fit == WALL_TEXTURE_FIT_OVERALL:
            total_wall_length = 0.0
            for wall in self.walls:
                total_wall_length += utils.get_length(wall[0], wall[1])
            # Also keep track of the length of wall covered
            wall_covered = 0.0
        
        wall_data = []
        # Triangulate each wall
        for i, wall in enumerate(self.walls):
            # Shared walls might need to draw wall above and/or below the
            # other room.
            if i in self.shared_walls:
                other = self.shared_walls[i]
                # Wall above the opening?
                if other.ceiling_height < self.ceiling_height:
                    quad_data = self.get_wall_triangle_data(
                                   i, other.ceiling_height, self.ceiling_height)
                    wall_data.extend(quad_data)
                    
                # Wall below the opening?
                if other.floor_height > self.floor_height:
                    quad_data = self.get_wall_triangle_data(
                                   i, self.floor_height, other.floor_height)
                    wall_data.extend(quad_data)
            
            else:
                quad_data = self.get_wall_triangle_data(i)
                wall_data.extend(quad_data)
        
        # Wall: put it in an array of GLfloats
        self.wall_data_count = len(wall_data) // 5
        wall_data = (GLfloat * len(wall_data))(*wall_data)
        # Add the data to the FBO
        glBindBuffer(GL_ARRAY_BUFFER, self.wall_data_vbo)
        glBufferData(GL_ARRAY_BUFFER, sizeof(wall_data), wall_data,
                     GL_STATIC_DRAW)            
Esempio n. 20
0
    def generate_wall_tex_coords(self):
        """For each point along the walls, generate texture coordinates and
        lightmap coordinates.
        
        """
        # Y coords for textures
        self.wall_texture_floor_height = 0.0
        self.wall_texture_ceiling_height = 1.0 / self.wall_texture_scale
        self.wall_lightmap_floor_height = 0.0
        self.wall_lightmap_ceiling_height = 1.0
        
        self.wall_texture_vertices = []
        self.wall_lightmap_vertices = []
        
        # Always start at the left hand side
        self.wall_texture_vertices.append(0.0)
        self.wall_lightmap_vertices.append(0.0)
        
        # Add up the lengths of all the walls
        total_wall_length = 0.0
        for wall in self.walls:
            total_wall_length += utils.get_length(wall[0], wall[1])

        # Room height used to work out how many times we need to repeat
        # the texture
        room_height = self.ceiling_height - self.floor_height

        # Keep track of the length of wall covered
        wall_covered = 0.0
        for i, wall in enumerate(self.walls):
            # Get the texture x coords
            wall_length = utils.get_length(wall[0], wall[1])
            wall_covered += wall_length
            
            if self.wall_texture_fit == WALL_TEXTURE_FIT_PER_WALL:
                # Ratio of room height to wall length determines how many 
                # times the texture repeats.
                repeat_count = round(wall_length / room_height)
                
                # Account for the texture's dimensions
                repeat_count *= (float(self.wall_texture.height) /
                                 float(self.wall_texture.width))
                repeat_count /= self.wall_texture_scale
                if repeat_count < 1.0:
                    repeat_count = 1.0
                tex_coord = repeat_count
            elif self.wall_texture_fit == WALL_TEXTURE_FIT_OVERALL:
                # Ratio of room height to total wall length determines how many 
                # times the texture repeats.
                repeat_count = round(total_wall_length / room_height)
                
                # Account for the texture's dimensions
                repeat_count *= (float(self.wall_texture.height) /
                                 float(self.wall_texture.width))
                repeat_count /= self.wall_texture_scale
                if repeat_count < 1.0:
                    repeat_count = 1.0
                tex_coord = ((wall_covered / total_wall_length) * repeat_count)
            else:
                raise ValueError("Unknown texture fit value: %s"
                                 % self.wall_texture_fit)
    
            self.wall_texture_vertices.append(tex_coord)
            
            # Get lightmap coords
            self.wall_lightmap_vertices.append(wall_covered / total_wall_length)
Esempio n. 21
0
                                                              "author": ctx.author.name}))

        if current.is_stream:
            return await ctx.send(ctx.t('commands.seek.isStream', {"emoji": self.disco.emoji["false"],
                                                                   "author": ctx.author.name}))

        if not (position_ := SEEK_RX.match(position)):
            raise commands.errors.UserInputError

        hours, minutes, seconds = sorted(position_.groups(), key=lambda x: x is not None)
        to_skip = timedelta(hours=int(hours.lstrip('0') or 0) if hours else 0,
                            minutes=int(minutes.lstrip('0') or 0) if minutes else 0,
                            seconds=int(seconds.lstrip('0') or 0) if seconds else 0)

        if position.startswith('-'):
            position = max(ctx.player.position - to_skip.total_seconds() * 1000, 0)
        elif position.startswith('+'):
            position = min(ctx.player.position + to_skip.total_seconds() * 1000, current.length)
        else:
            position = min(to_skip.total_seconds() * 1000, current.length)

        await ctx.player.seek(position)
        await ctx.send(ctx.t('commands.seek.success', {"emoji": self.disco.emoji["true"],
                                                       "author": ctx.author.name,
                                                       "track": current.title,
                                                       "position": get_length(position)}))


def setup(disco):
    disco.add_cog(Music(disco))
Esempio n. 22
0
    async def _play(self, ctx, *, query):
        if not web_url(query):
            query = f'ytsearch:{query}'

        results = await self.disco.wavelink.get_tracks(query)
        if not results:
            return await ctx.send(ctx.t('commands.play.noResults', {"author": ctx.author.name,
                                                                    "emoji": self.disco.emoji["false"]}))

        player = ctx.player

        if hasattr(results, 'tracks'):
            total_length = 0
            tracks = results.tracks[:1500 - player.size]
            for track in tracks:
                total_length += 0 if track.is_stream else track.length
                player.queue.append(DiscoTrack(ctx.author, track.id, track.info))

            name = results.data['playlistInfo']['name']
            await ctx.send(ctx.t('commands.play.playlistAdded', {"playlist": name,
                                                                 "emoji": self.disco.emoji["plus"],
                                                                 "length": get_length(total_length),
                                                                 "added": len(tracks)}))
        else:
            if len(results) == 1:
                track = results[0]

                player.queue.append(DiscoTrack(ctx.author, track.id, track.info))

                await ctx.send(ctx.t('commands.play.trackAdded', {"track": track,
                                                                  "emoji": self.disco.emoji["plus"],
                                                                  "length": 'LIVESTREAM' if track.is_stream else
                                                                  get_length(track.length)}))
            else:
                tracks = results[:8]
                options = '\n'.join(f'**`»`** `{i}` [**{track}**]({track.uri}) `[{get_length(track.length)}]`'
                                    for i, track in enumerate(tracks, 1))

                cancel = ctx.t('commons.exit').lower()

                em = discord.Embed(
                    colour=self.disco.color[0],
                    title=ctx.t('commands.play.chooseOne'),
                    description=options
                ).set_author(
                    name=ctx.t('commands.play.searchResults'),
                    icon_url=ctx.guild.icon_url
                ).set_thumbnail(
                    url=self.disco.user.avatar_url
                ).set_footer(
                    text=ctx.t('commands.play.typeToCancel', {
                        "value": cancel
                    })
                )

                q = await ctx.send(content=ctx.author.mention, embed=em)

                def check(m):
                    return m.channel.id == q.channel.id and m.author.id == ctx.author.id and m.content \
                        and (m.content.isdecimal() and 0 < int(m.content) <= len(tracks) or m.content.lower() == cancel)

                try:
                    a = await self.disco.wait_for('message', timeout=120, check=check)
                except Timeout:
                    a = None

                if not a or a.content.lower() == cancel:
                    return await q.delete()

                track = tracks[int(a.content) - 1]

                player.queue.append(DiscoTrack(ctx.author, track.id, track.info))

                await q.edit(content=ctx.t('commands.play.trackAdded', {"track": track,
                                                                        "emoji": self.disco.emoji["plus"],
                                                                        "length": get_length(track.length)}),
                             embed=None)

        if not player.current:
            await player.play(ctx.player.queue.pop(0))