コード例 #1
0
ファイル: Prime.py プロジェクト: shahafShuhamy/interests
class Prime():
    nextUpdateTag = 'NextInterestDecisionDate'
    logger = None

    def __init__(self):
        self.logger = Logger()
        pass

    """
     this method returns an HTML with Table
     API return Table with no HTML Tags
    """

    def getPrime(self):
        self.log('getting prime interest...')
        baseUrl = "https://www.boi.org.il/he/BankingSupervision/Data/_layouts/boi/handlers/WebPartHandler.aspx"
        params = {
            'wp': 'ItemsAggregator',
            'PageId': '150',
            'CqfDateFrom': '',
            'CqfDateTo': '',
            '_': '1557755447808'
        }
        year = str(int(time.strftime("%Y")) - 1)
        params['CqfDateFrom'] = time.strftime("%d/%m/") + year
        params['CqfDateTo'] = '01/01/' + time.strftime("%Y")
        # self.log("Today : " + dateToday + ", First date of the Year " + startOfYear)
        # add html and body tags to make this an HTML.
        return "<html><body>" + ClientGet(baseUrl, params) + "</body></html>"

    """
    this method return a string with a date
    date is the next interests table will be updated
    """

    def getNextUpdate(self, primeTable):
        self.log('getting next Update from xml...')
        xmlUrl = "https://www.boi.org.il/HE/BoiLatestPublication.xml"
        xmlParser = XmlParser()
        xmlResult = ClientGet(xmlUrl, None)
        value = xmlParser.findTagInString(xmlResult, self.nextUpdateTag)
        return value

    def log(self, message):
        self.logger.log('Prime', message)

    def TEST_Get_Next_Update(self):
        prime = Prime()
        print(prime.getNextUpdate())

    def TEST_Get_Prime_Table(self):
        prime = Prime()
        print(prime.getPrime())
コード例 #2
0
class Misc(commands.Cog):
    def __init__(self, bot):
        self.bot = bot
        self.logger = Logger(False)
        self.logger.log(name="Discord", output="Misc Cog Loaded!")
        self.db = DatabaseHandler(db="database.db")
        self.db.connect()

    @commands.command(name="ping", aliases=["pang", "peng", "pong", "pung"])
    async def _ping(self, ctx):
        ping = round(self.bot.latency * 1000)
        e = discord.Embed()
        if ping < 30:
            e.colour = 0x39fc03
        elif ping < 75:
            e.colour = 0xf7ba00
        else:
            e.colour = 0xf70000

        e.title = "Pong! :ping_pong:"
        e.description = f"API Latency: {ping}ms"

        await ctx.send(embed=e)

    @commands.command(name="tier", aliases=["rank"])
    async def _tier(self, ctx, member: discord.User = None):
        tier = ""
        em = discord.Embed()
        em.colour = COLOUR
        if member is None:
            # do invoker
            em.title = f"{ctx.author.name}'s Tier"
            tier = getUserTier(ctx.author.id)
            em.description = f"Your current tier is: {tier}!"
            return await ctx.send(embed=em)

        em.title = f"{member.name}'s Tier"
        tier = getUserTier(member.id)
        em.description = f"{member.name} tier is: {tier}!"
        await ctx.send(embed=em)

    @commands.command(name="test")
    async def _test(self, ctx):
        await ctx.send(isUserDeveloper(ctx.author.id))
コード例 #3
0
    # module name
    if len(sys.argv) > 2:
        name = sys.argv[2]

    # gpu number
    if len(sys.argv) > 3:
        gpu = sys.argv[3]

    if application == "train":
        # check if requried data version downloaded
        if not os.path.isfile(version_filename_flag):
            print(
                "Error: Data wasn't downloaded. Type python Run.py for instructions how to download\n\n"
            )
            exit()
        logger.log("Command: Train(module_name=%s, gpu=%s" % (name, str(gpu)))
        train(name=name, gpu=gpu)

    elif application == "eval":
        # check if requried data version downloaded
        if not os.path.isfile(version_filename_flag):
            print(
                "Error: Data wasn't downloaded. Type python Run.py for instructions how to download\n\n"
            )
            exit()

        logger.log("Command: Eval(module_name=%s, gpu=%s" % (name, str(gpu)))
        eval(load_module_name=name, gpu=gpu)

    elif application == "download":
        logger.log("Command: Download()")
コード例 #4
0
def train(name="module",
          nof_iterations=100,
          learning_rate=0.0001,
          learning_rate_steps=1000,
          learning_rate_decay=0.5,
          load_module_name="module.ckpt",
          use_saved_module=False,
          batch_size=20,
          pred_pos_neg_ratio=10,
          lr_object_coeff=4,
          layers=[500, 500, 500],
          gpu=0):
    """
    Train SGP module given train parameters and module hyper-parameters
    :param name: name of the train session
    :param nof_iterations: number of epochs
    :param learning_rate:
    :param learning_rate_steps: decay after number of steps
    :param learning_rate_decay: the factor to decay the learning rate
    :param load_module_name: name of already trained module weights to load
    :param use_saved_module: start from already train module
    :param batch_size: number of images in each mini-batch
    :param pred_pos_neg_ratio: Set the loss ratio between positive and negatives (not labeled) predicates
    :param lr_object_coeff: Set the loss ratio between objects and predicates
    :param layers: list of sizes of the hidden layer of the predicate and object classifier
    :param gpu: gpu number to use for the training
    :return: nothing
    """
    gpi_type = "Linguistic"
    including_object = True
    # get filesmanager
    filesmanager = FilesManager()

    # create logger
    logger_path = filesmanager.get_file_path("logs")
    logger_path = os.path.join(logger_path, name)
    logger = Logger(name, logger_path)

    # print train params
    frame = inspect.currentframe()
    args, _, _, values = inspect.getargvalues(frame)
    logger.log('function name "%s"' % inspect.getframeinfo(frame)[2])
    for i in args:
        logger.log("    %s = %s" % (i, values[i]))

    # set gpu
    if gpu != None:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)
        logger.log("os.environ[\"CUDA_VISIBLE_DEVICES\"] = " + str(gpu))

    # create module
    module = Module(gpi_type=gpi_type,
                    nof_predicates=NOF_PREDICATES,
                    nof_objects=NOF_OBJECTS,
                    is_train=True,
                    learning_rate=learning_rate,
                    learning_rate_steps=learning_rate_steps,
                    learning_rate_decay=learning_rate_decay,
                    lr_object_coeff=lr_object_coeff,
                    including_object=including_object,
                    layers=layers)

    ##
    # get module place holders
    #
    # get input place holders
    confidence_relation_ph, confidence_entity_ph, bb_ph, word_embed_relations_ph, word_embed_entities_ph = module.get_in_ph(
    )
    # get labels place holders
    labels_relation_ph, labels_entity_ph, labels_coeff_loss_ph = module.get_labels_ph(
    )
    # get loss and train step
    loss, gradients, grad_placeholder, train_step = module.get_module_loss()

    ##
    # get module output
    out_relation_probes, out_entity_probes = module.get_output()

    # Initialize the Computational Graph
    init = tf.global_variables_initializer()
    # Add ops to save and restore all the variables.
    variables = tf.contrib.slim.get_variables_to_restore()
    variables_to_restore = variables
    saver = tf.train.Saver(variables_to_restore)

    with tf.Session() as sess:
        # Restore variables from disk.
        module_path = filesmanager.get_file_path("sg_module.train.saver")
        module_path_load = os.path.join(module_path, load_module_name)
        if os.path.exists(module_path_load + ".index") and use_saved_module:
            saver.restore(sess, module_path_load)
            logger.log("Model restored.")
        else:
            sess.run(init)

        # train images
        vg_train_path = filesmanager.get_file_path("data.visual_genome.train")
        # list of train files
        train_files_list = range(2, 72)
        shuffle(train_files_list)

        # Actual validation is 5 files.
        # After tunning the hyper parameters, use just 2 files for early stopping.
        validation_files_list = range(2)

        # create one hot vector for predicate_negative (i.e. not labeled)
        relation_neg = np.zeros(NOF_PREDICATES)
        relation_neg[NOF_PREDICATES - 1] = 1

        # object embedding
        embed_obj = FilesManager().load_file(
            "language_module.word2vec.object_embeddings")
        embed_pred = FilesManager().load_file(
            "language_module.word2vec.predicate_embeddings")
        embed_pred = np.concatenate(
            (embed_pred, np.zeros(embed_pred[:1].shape)),
            axis=0)  # concat negative represntation

        # train module
        lr = learning_rate
        best_test_loss = -1
        baseline_path = filesmanager.get_file_path(
            "data.visual_genome.train_baseline")
        for epoch in xrange(1, nof_iterations):
            accum_results = None
            total_loss = 0
            steps = []
            # read data
            file_index = -1
            for file_name in train_files_list:

                file_index += 1

                # load data from file
                file_path = os.path.join(vg_train_path, str(file_name) + ".p")
                file_handle = open(file_path, "rb")
                train_images = cPickle.load(file_handle)
                file_handle.close()
                shuffle(train_images)

                for image in train_images:
                    # load initial belief by baseline detector
                    file_path = os.path.join(baseline_path,
                                             str(image.image.id) + ".p")
                    if not os.path.exists(file_path):
                        continue
                    file_handle = open(file_path, "rb")
                    decetctor_data = cPickle.load(file_handle)
                    file_handle.close()
                    image.predicates_outputs_with_no_activation = decetctor_data[
                        "rel_dist_mapped"]
                    image.objects_outputs_with_no_activations = decetctor_data[
                        "obj_dist_mapped"]

                    # set diagonal to be negative predicate (no relation for a single object)
                    indices = np.arange(
                        image.predicates_outputs_with_no_activation.shape[0])
                    image.predicates_outputs_with_no_activation[
                        indices, indices, :] = relation_neg
                    image.predicates_labels[indices, indices, :] = relation_neg

                    # spatial features
                    entity_bb = np.zeros((len(image.objects), 14))
                    for obj_id in range(len(image.objects)):
                        entity_bb[obj_id][0] = image.objects[obj_id].x / 1200.0
                        entity_bb[obj_id][1] = image.objects[obj_id].y / 1200.0
                        entity_bb[obj_id][2] = (
                            image.objects[obj_id].x +
                            image.objects[obj_id].width) / 1200.0
                        entity_bb[obj_id][3] = (
                            image.objects[obj_id].y +
                            image.objects[obj_id].height) / 1200.0
                        entity_bb[obj_id][4] = image.objects[obj_id].x
                        entity_bb[obj_id][5] = -1 * image.objects[obj_id].x
                        entity_bb[obj_id][6] = image.objects[obj_id].y
                        entity_bb[obj_id][7] = -1 * image.objects[obj_id].y
                        entity_bb[obj_id][8] = image.objects[
                            obj_id].width * image.objects[obj_id].height
                        entity_bb[obj_id][9] = -1 * image.objects[
                            obj_id].width * image.objects[obj_id].height
                    entity_bb[:, 4] = np.argsort(entity_bb[:, 4])
                    entity_bb[:, 5] = np.argsort(entity_bb[:, 5])
                    entity_bb[:, 6] = np.argsort(entity_bb[:, 6])
                    entity_bb[:, 7] = np.argsort(entity_bb[:, 7])
                    entity_bb[:, 8] = np.argsort(entity_bb[:, 8])
                    entity_bb[:, 9] = np.argsort(entity_bb[:, 9])
                    entity_bb[:, 10] = np.argsort(
                        np.max(image.objects_outputs_with_no_activations,
                               axis=1))
                    entity_bb[:, 11] = np.argsort(-1 * np.max(
                        image.objects_outputs_with_no_activations, axis=1))
                    entity_bb[:, 12] = np.arange(entity_bb.shape[0])
                    entity_bb[:, 13] = np.arange(entity_bb.shape[0], 0, -1)

                    # filter non mixed cases
                    relations_neg_labels = image.predicates_labels[:, :,
                                                                   NOF_PREDICATES
                                                                   - 1:]
                    if np.sum(image.predicates_labels[:, :, :NOF_PREDICATES -
                                                      1]) == 0:
                        continue

                    if including_object:
                        in_entity_confidence = image.objects_outputs_with_no_activations
                    else:
                        in_entity_confidence = image.objects_labels * 1000

                    # give lower weight to negatives
                    coeff_factor = np.ones(relations_neg_labels.shape)
                    factor = float(
                        np.sum(
                            image.predicates_labels[:, :, :NOF_PREDICATES - 2])
                    ) / np.sum(relations_neg_labels) / pred_pos_neg_ratio
                    coeff_factor[relations_neg_labels == 1] *= factor

                    coeff_factor[indices, indices] = 0

                    # create the feed dictionary
                    feed_dict = {
                        confidence_relation_ph:
                        image.predicates_outputs_with_no_activation,
                        confidence_entity_ph: in_entity_confidence,
                        bb_ph: entity_bb,
                        module.phase_ph: True,
                        word_embed_entities_ph: embed_obj,
                        word_embed_relations_ph: embed_pred,
                        labels_relation_ph: image.predicates_labels,
                        labels_entity_ph: image.objects_labels,
                        labels_coeff_loss_ph: coeff_factor.reshape((-1)),
                        module.lr_ph: lr
                    }

                    # run the network
                    out_relation_probes_val, out_entity_probes_val, loss_val, gradients_val = \
                        sess.run([out_relation_probes, out_entity_probes, loss, gradients],
                                 feed_dict=feed_dict)
                    if math.isnan(loss_val):
                        print("NAN")
                        continue

                    # set diagonal to be neg (in order not to take into account in statistics)
                    out_relation_probes_val[indices, indices, :] = relation_neg

                    # append gradient to list (will be applied as a batch of entities)
                    steps.append(gradients_val)

                    # statistic
                    total_loss += loss_val

                    results = test(image.predicates_labels,
                                   image.objects_labels,
                                   out_relation_probes_val,
                                   out_entity_probes_val)

                    # accumulate results
                    if accum_results is None:
                        accum_results = results
                    else:
                        for key in results:
                            accum_results[key] += results[key]

                    if len(steps) == batch_size:
                        # apply steps
                        step = steps[0]
                        feed_grad_apply_dict = {
                            grad_placeholder[j][0]: step[j][0]
                            for j in xrange(len(grad_placeholder))
                        }
                        for i in xrange(1, len(steps)):
                            step = steps[i]
                            for j in xrange(len(grad_placeholder)):
                                feed_grad_apply_dict[grad_placeholder[j]
                                                     [0]] += step[j][0]

                        feed_grad_apply_dict[module.lr_ph] = lr
                        sess.run([train_step], feed_dict=feed_grad_apply_dict)
                        steps = []
                # print stat - per file just for the first epoch - disabled!!
                if epoch == 1:
                    obj_accuracy = float(accum_results['entity_correct']
                                         ) / accum_results['entity_total']
                    predicate_pos_accuracy = float(
                        accum_results['relations_pos_correct']
                    ) / accum_results['relations_pos_total']
                    relationships_pos_accuracy = float(
                        accum_results['relationships_pos_correct']
                    ) / accum_results['relations_pos_total']
                    logger.log(
                        "iter %d.%d - obj %f - pred %f - relation %f" %
                        (epoch, file_index, obj_accuracy,
                         predicate_pos_accuracy, relationships_pos_accuracy))

            # print stat per epoch
            obj_accuracy = float(accum_results['entity_correct']
                                 ) / accum_results['entity_total']
            predicate_pos_accuracy = float(
                accum_results['relations_pos_correct']
            ) / accum_results['relations_pos_total']
            predicate_all_accuracy = float(accum_results['relations_correct']
                                           ) / accum_results['relations_total']
            relationships_pos_accuracy = float(
                accum_results['relationships_pos_correct']
            ) / accum_results['relations_pos_total']
            relationships_all_accuracy = float(
                accum_results['relationships_correct']
            ) / accum_results['relations_total']

            logger.log(
                "iter %d - loss %f - obj %f - pred %f - rela %f - all_pred %f - all rela %f - lr %f"
                % (epoch, total_loss, obj_accuracy, predicate_pos_accuracy,
                   relationships_pos_accuracy, predicate_all_accuracy,
                   relationships_all_accuracy, lr))

            # run validation
            if epoch % TEST_ITERATIONS == 0:
                total_test_loss = 0
                accum_test_results = None

                for file_name in validation_files_list:
                    # load data from file
                    file_path = os.path.join(vg_train_path,
                                             str(file_name) + ".p")
                    file_handle = open(file_path, "rb")
                    validation_images = cPickle.load(file_handle)
                    file_handle.close()

                    for image in validation_images:
                        file_path = os.path.join(baseline_path,
                                                 str(image.image.id) + ".p")
                        if not os.path.exists(file_path):
                            continue
                        file_handle = open(file_path, "rb")
                        detector_data = cPickle.load(file_handle)
                        file_handle.close()

                        image.predicates_outputs_with_no_activation = detector_data[
                            "rel_dist_mapped"]
                        image.objects_outputs_with_no_activations = detector_data[
                            "obj_dist_mapped"]
                        # set diagonal to be neg
                        indices = np.arange(
                            image.predicates_outputs_with_no_activation.
                            shape[0])
                        image.predicates_outputs_with_no_activation[
                            indices, indices, :] = relation_neg
                        image.predicates_labels[indices,
                                                indices, :] = relation_neg

                        # get shape of extended object to be used by the module
                        extended_confidence_object_shape = np.asarray(
                            image.predicates_outputs_with_no_activation.shape)
                        extended_confidence_object_shape[2] = NOF_OBJECTS

                        # spatial features
                        entity_bb = np.zeros((len(image.objects), 14))
                        for obj_id in range(len(image.objects)):
                            entity_bb[obj_id][
                                0] = image.objects[obj_id].x / 1200.0
                            entity_bb[obj_id][
                                1] = image.objects[obj_id].y / 1200.0
                            entity_bb[obj_id][2] = (
                                image.objects[obj_id].x +
                                image.objects[obj_id].width) / 1200.0
                            entity_bb[obj_id][3] = (
                                image.objects[obj_id].y +
                                image.objects[obj_id].height) / 1200.0
                            entity_bb[obj_id][4] = image.objects[obj_id].x
                            entity_bb[obj_id][5] = -1 * image.objects[obj_id].x
                            entity_bb[obj_id][6] = image.objects[obj_id].y
                            entity_bb[obj_id][7] = -1 * image.objects[obj_id].y
                            entity_bb[obj_id][8] = image.objects[
                                obj_id].width * image.objects[obj_id].height
                            entity_bb[obj_id][9] = -1 * image.objects[
                                obj_id].width * image.objects[obj_id].height
                        entity_bb[:, 4] = np.argsort(entity_bb[:, 4])
                        entity_bb[:, 5] = np.argsort(entity_bb[:, 5])
                        entity_bb[:, 6] = np.argsort(entity_bb[:, 6])
                        entity_bb[:, 7] = np.argsort(entity_bb[:, 7])
                        entity_bb[:, 8] = np.argsort(entity_bb[:, 8])
                        entity_bb[:, 9] = np.argsort(entity_bb[:, 9])
                        entity_bb[:, 10] = np.argsort(
                            np.max(image.objects_outputs_with_no_activations,
                                   axis=1))
                        entity_bb[:, 11] = np.argsort(-1 * np.max(
                            image.objects_outputs_with_no_activations, axis=1))
                        entity_bb[:, 12] = np.arange(entity_bb.shape[0])
                        entity_bb[:, 13] = np.arange(entity_bb.shape[0], 0, -1)

                        # filter non mixed cases
                        relations_neg_labels = image.predicates_labels[:, :,
                                                                       NOF_PREDICATES
                                                                       - 1:]
                        if np.sum(
                                image.predicates_labels[:, :, :NOF_PREDICATES -
                                                        1]) == 0:
                            continue

                        # give lower weight to negatives
                        coeff_factor = np.ones(relations_neg_labels.shape)
                        factor = float(
                            np.sum(
                                image.predicates_labels[:, :, :NOF_PREDICATES -
                                                        2])
                        ) / np.sum(relations_neg_labels) / pred_pos_neg_ratio
                        coeff_factor[relations_neg_labels == 1] *= factor
                        coeff_factor[indices, indices] = 0
                        coeff_factor[relations_neg_labels == 1] = 0

                        if including_object:
                            in_entity_confidence = image.objects_outputs_with_no_activations
                        else:
                            in_entity_confidence = image.objects_labels * 1000

                        # create the feed dictionary
                        feed_dict = {
                            confidence_relation_ph:
                            image.predicates_outputs_with_no_activation,
                            confidence_entity_ph: in_entity_confidence,
                            module.entity_bb_ph: entity_bb,
                            module.word_embed_entities_ph: embed_obj,
                            module.phase_ph: False,
                            module.word_embed_relations_ph: embed_pred,
                            labels_relation_ph: image.predicates_labels,
                            labels_entity_ph: image.objects_labels,
                            labels_coeff_loss_ph: coeff_factor.reshape((-1))
                        }

                        # run the network
                        out_relation_probes_val, out_entity_probes_val, loss_val = sess.run(
                            [out_relation_probes, out_entity_probes, loss],
                            feed_dict=feed_dict)

                        # set diagonal to be neg (in order not to take into account in statistics)
                        out_relation_probes_val[indices,
                                                indices, :] = relation_neg

                        # statistic
                        total_test_loss += loss_val

                        # statistics
                        results = test(image.predicates_labels,
                                       image.objects_labels,
                                       out_relation_probes_val,
                                       out_entity_probes_val)

                        # accumulate results
                        if accum_test_results is None:
                            accum_test_results = results
                        else:
                            for key in results:
                                accum_test_results[key] += results[key]

                # print stat
                obj_accuracy = float(accum_test_results['entity_correct']
                                     ) / accum_test_results['entity_total']
                predicate_pos_accuracy = float(
                    accum_test_results['relations_pos_correct']
                ) / accum_test_results['relations_pos_total']
                predicate_all_accuracy = float(
                    accum_test_results['relations_correct']
                ) / accum_test_results['relations_total']
                relationships_pos_accuracy = float(accum_test_results['relationships_pos_correct']) / \
                                             accum_test_results[
                                                 'relations_pos_total']
                relationships_all_accuracy = float(
                    accum_test_results['relationships_correct']
                ) / accum_test_results['relations_total']

                logger.log(
                    "VALIDATION - loss %f - obj %f - pred %f - rela %f - all_pred %f - all rela %f"
                    % (total_test_loss, obj_accuracy, predicate_pos_accuracy,
                       relationships_pos_accuracy, predicate_all_accuracy,
                       relationships_all_accuracy))

                # save best module so far
                if best_test_loss == -1 or total_test_loss < best_test_loss:
                    module_path_save = os.path.join(module_path,
                                                    name + "_best_module.ckpt")
                    save_path = saver.save(sess, module_path_save)
                    logger.log("Model saved in file: %s" % save_path)
                    best_test_loss = total_test_loss

            # save module
            if epoch % SAVE_MODEL_ITERATIONS == 0:
                module_path_save = os.path.join(module_path,
                                                name + "_module.ckpt")
                save_path = saver.save(sess, module_path_save)
                logger.log("Model saved in file: %s" % save_path)

            # learning rate decay
            if (epoch % learning_rate_steps) == 0:
                lr *= learning_rate_decay
コード例 #5
0
class Music(commands.Cog):
    def __init__(self, bot):
        self.bot = bot

        if not hasattr(bot, 'lavalink'):  # This ensures the client isn't overwritten during cog reloads.
            bot.lavalink = lavalink.Client(bot.user.id)
            bot.lavalink.add_node('avexlava.herokuapp.com', 80, 'youshallnotpass', 'us', 'default-node')  # Host, Port, Password, Region, Name
            bot.add_listener(bot.lavalink.voice_update_handler, 'on_socket_response')

        lavalink.add_event_hook(self.track_hook)

        self.logger = Logger(False)
        self.logger.log(name="Discord", output='Music Cog Loaded!')

    def cog_unload(self):
        """ Cog unload handler. This removes any event hooks that were registered. """
        self.bot.lavalink._event_hooks.clear()

    async def cog_before_invoke(self, ctx):
        """ Command before-invoke handler. """
        guild_check = ctx.guild is not None
        #  This is essentially the same as `@commands.guild_only()`
        #  except it saves us repeating ourselves (and also a few lines).

        if guild_check:
            await self.ensure_voice(ctx)
            #  Ensure that the bot and command author share a mutual voicechannel.

        return guild_check

    async def cog_command_error(self, ctx, error):
        if isinstance(error, commands.CommandInvokeError):
            await ctx.send(error.original)
            # The above handles errors thrown in this cog and shows them to the user.
            # This shouldn't be a problem as the only errors thrown in this cog are from `ensure_voice`
            # which contain a reason string, such as "Join a voicechannel" etc. You can modify the above
            # if you want to do things differently.

    async def ensure_voice(self, ctx):
        """ This check ensures that the bot and command author are in the same voicechannel. """
        player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
        # Create returns a player if one exists, otherwise creates.
        # This line is important because it ensures that a player always exists for a guild.

        # Most people might consider this a waste of resources for guilds that aren't playing, but this is
        # the easiest and simplest way of ensuring players are created.

        # These are commands that require the bot to join a voicechannel (i.e. initiating playback).
        # Commands such as volume/skip etc don't require the bot to be in a voicechannel so don't need listing here.
        should_connect = ctx.command.name in ('play',)

        if not ctx.author.voice or not ctx.author.voice.channel:
            # Our cog_command_error handler catches this and sends it to the voicechannel.
            # Exceptions allow us to "short-circuit" command invocation via checks so the
            # execution state of the command goes no further.
            raise commands.CommandInvokeError('Join a voicechannel first.')

        if not player.is_connected:
            if not should_connect:
                raise commands.CommandInvokeError('Not connected.')

            permissions = ctx.author.voice.channel.permissions_for(ctx.me)

            if not permissions.connect or not permissions.speak:  # Check user limit too?
                raise commands.CommandInvokeError('I need the `CONNECT` and `SPEAK` permissions.')

            player.store('channel', ctx.channel.id)
            await self.connect_to(ctx.guild.id, str(ctx.author.voice.channel.id))
        else:
            if int(player.channel_id) != ctx.author.voice.channel.id:
                raise commands.CommandInvokeError('You need to be in my voicechannel.')

    async def track_hook(self, event):
        if isinstance(event, lavalink.events.QueueEndEvent):
            # When this track_hook receives a "QueueEndEvent" from lavalink.py
            # it indicates that there are no tracks left in the player's queue.
            # To save on resources, we can tell the bot to disconnect from the voicechannel.
            guild_id = int(event.player.guild_id)
            await self.connect_to(guild_id, None)

    async def connect_to(self, guild_id: int, channel_id: str):
        """ Connects to the given voicechannel ID. A channel_id of `None` means disconnect. """
        ws = self.bot._connection._get_websocket(guild_id)
        await ws.voice_state(str(guild_id), channel_id)
        # The above looks dirty, we could alternatively use `bot.shards[shard_id].ws` but that assumes
        # the bot instance is an AutoShardedBot.

    @commands.command(aliases=['p'])
    async def play(self, ctx, *, query: str):
        """ Searches and plays a song from a given query. """
        # Get the player for this guild from cache.
        player = self.bot.lavalink.player_manager.get(ctx.guild.id)
        # Remove leading and trailing <>. <> may be used to suppress embedding links in Discord.
        query = query.strip('<>')

        # Check if the user input might be a URL. If it isn't, we can Lavalink do a YouTube search for it instead.
        # SoundCloud searching is possible by prefixing "scsearch:" instead.
        if not url_rx.match(query):
            query = f'ytsearch:{query}'

        # Get the results for the query from Lavalink.
        results = await player.node.get_tracks(query)

        # Results could be None if Lavalink returns an invalid response (non-JSON/non-200 (OK)).
        # ALternatively, resullts['tracks'] could be an empty array if the query yielded no tracks.
        if not results or not results['tracks']:
            return await ctx.send('Nothing found!')

        embed = discord.Embed(color=discord.Color.blurple())

        # Valid loadTypes are:
        #   TRACK_LOADED    - single video/direct URL)
        #   PLAYLIST_LOADED - direct URL to playlist)
        #   SEARCH_RESULT   - query prefixed with either ytsearch: or scsearch:.
        #   NO_MATCHES      - query yielded no results
        #   LOAD_FAILED     - most likely, the video encountered an exception during loading.
        if results['loadType'] == 'PLAYLIST_LOADED':
            tracks = results['tracks']

            for track in tracks:
                # Add all of the tracks from the playlist to the queue.
                player.add(requester=ctx.author.id, track=track)

            embed.title = 'Playlist Enqueued!'
            embed.description = f'{results["playlistInfo"]["name"]} - {len(tracks)} tracks'
        else:
            track = results['tracks'][0]
            embed.title = 'Track Enqueued'
            embed.description = f'[{track["info"]["title"]}]({track["info"]["uri"]})'

            # You can attach additional information to audiotracks through kwargs, however this involves
            # constructing the AudioTrack class yourself.
            track = lavalink.models.AudioTrack(track, ctx.author.id, recommended=True)
            player.add(requester=ctx.author.id, track=track)

        await ctx.send(embed=embed)

        # We don't want to call .play() if the player is playing as that will effectively skip
        # the current track.
        if not player.is_playing:
            await player.play()

    @commands.command(aliases=['dc'])
    async def disconnect(self, ctx):
        """ Disconnects the player from the voice channel and clears its queue. """
        player = self.bot.lavalink.player_manager.get(ctx.guild.id)

        if not player.is_connected:
            # We can't disconnect, if we're not connected.
            return await ctx.send('Not connected.')

        if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):
            # Abuse prevention. Users not in voice channels, or not in the same voice channel as the bot
            # may not disconnect the bot.
            return await ctx.send('You\'re not in my voicechannel!')

        # Clear the queue to ensure old tracks don't start playing
        # when someone else queues something.
        player.queue.clear()
        # Stop the current track so Lavalink consumes less resources.
        await player.stop()
        # Disconnect from the voice channel.
        await self.connect_to(ctx.guild.id, None)
        await ctx.send('*⃣ | Disconnected.')
コード例 #6
0
ファイル: Gamer.py プロジェクト: rik7821/JJGamer
class Gamer(object):
    def __init__(self):
        self.device = Device()
        self.chess = Chess(path.join('Images', 'chess.png'))
        self.play_ground = PlayGround()
        self.logger = Logger('Log/')

    def __scan(self):
        pos = 0
        for i in range(500, 1000):
            res = []
            for j in range(len(self.play_ground.raw_png[i]) - 1):
                # if chess locates at screen center
                if self.chess.top_left[0] <= j <= self.chess.bottom_right[0]:
                    res.append(0)
                else:
                    prev_color = int(self.play_ground.raw_png[i][j])
                    next_color = int(self.play_ground.raw_png[i][j + 1])
                    diff_color = abs(prev_color - next_color)
                    if diff_color < 2:
                        diff_color = 0
                    res.append(diff_color)
            if sum(res) >= 10:
                cnt = 0
                for j in range(len(res)):
                    if res[j] > 0:
                        cnt += 1
                        pos += j
                pos //= cnt
                break
        return pos

    def __distance(self, x_dest: int):
        x_src_left = self.chess.top_left[0]
        x_src_right = self.chess.bottom_right[0]
        x_src = (x_src_right - x_src_left) // 2 + x_src_left
        x_diff = abs(x_dest - x_src)
        return x_diff

    def play(self):
        # get screen cap
        screencap_filename = self.device.get_screencap()
        # update raw png data
        self.play_ground.read_png(screencap_filename)
        self.chess.update_loc(self.play_ground)

        pos = self.__scan()
        dist = self.__distance(pos)

        # save data
        playground_img = self.play_ground.raw_png
        cv.rectangle(playground_img, self.chess.top_left,
                     self.chess.bottom_right, 128, 3)
        cv.line(playground_img, (pos, 0), (pos, 1920), 0, 3)
        screencap_img = cv.imread(screencap_filename)
        self.logger.log(playground_img, screencap_img)

        # move
        self.device.press(self.device.coef * dist)

        # remove temp screen shot
        remove(screencap_filename)
コード例 #7
0
def train(data_path, params_path, log_path, log_name, HPsearch):

    data_path = Path(data_path)
    params_path = Path(params_path)
    log_path = Path(log_path)
    log_name = log_name

    logger = Logger(log_path, log_name)

    if HPsearch:
        ntrials = 100
        paramsB, trialsB = parameter_search(
            ntrials, objective_function_veracity_branchLSTM, data_path)

    else:
        trialsB = pickle.load(open(params_path / 'trials_veracity.txt', "rb"))
        paramsB = pickle.load(
            open(params_path / 'bestparams_veracity.txt', "rb"))

    best_trial_idB = trialsB.best_trial["tid"]
    best_trial_lossB = trialsB.best_trial["result"]["loss"]
    dev_result_idB = trialsB.attachments["ATTACH::%d::ID" % best_trial_idB]
    dev_result_predictionsB = trialsB.attachments["ATTACH::%d::Predictions" %
                                                  best_trial_idB]
    dev_result_labelB = trialsB.attachments["ATTACH::%d::Labels" %
                                            best_trial_idB]

    print(accuracy_score(dev_result_labelB, dev_result_predictionsB))
    print(f1_score(dev_result_labelB, dev_result_predictionsB,
                   average='macro'))

    metafeatures_combinations = [
        ["stance"],  # stance = S
        ["social_interest", "user_information"],  # Metadata = MD
        ["cosine_similarity"],  # Semantic content = SC
        ["stance", "social_interest", "user_information"],  # S + MD
        ["cosine_similarity", "social_interest",
         "user_information"],  # SC + MD
        ["cosine_similarity", "stance"],  # SC + S
        ["cosine_similarity", "social_interest", "user_information",
         "stance"]  # FULL MODEL
    ]

    #Running the model with our different folds
    for Early_Stopping in [False, True]:
        for metac_idx in range(len(metafeatures_combinations)):
            if metac_idx in [0, 1, 3]:
                embeddings_present = False
            else:
                embeddings_present = True
            print("Commencing training with the combination: {}".format(
                metafeatures_combinations[metac_idx]))
            print("Using Embeddings: ", embeddings_present)
            if Early_Stopping:
                paramsB["num_epochs"] = 64
            test_result_idB, test_result_predictionsB, test_result_labelB, confidenceB, mactest_F = evaluation_function_veracity_branchLSTM(
                data_path,
                paramsB,
                metafeatures_combinations[metac_idx],
                use_embeddings=embeddings_present,
                Early_Stopping=Early_Stopping)
            precision = precision_score(test_result_labelB,
                                        test_result_predictionsB,
                                        average='macro')
            recall = recall_score(test_result_labelB,
                                  test_result_predictionsB,
                                  average='macro')
            message = "Combination: {}\nUsing Embeddings: {}\nWith EarlyStopping: {}\nResulted in: \nPrecision: {}\nRecall: {}\nF1: {}\nAccuracy: {}\n\n".format(
                metafeatures_combinations[metac_idx], embeddings_present,
                Early_Stopping, precision, recall, mactest_F,
                accuracy_score(test_result_labelB, test_result_predictionsB))
            logger.log(message)
コード例 #8
0
def eval(load_module_name=None, k=100, layers=[500, 500, 500], gpu=1):
    """
    Evaluate module:
    - Scene Graph Classification - R@k metric (measures the fraction of ground truth relationships
      triplets that appear among the k most confident triplet prediction in an image)
    :param load_module_name: name of the module to load
    :param k: see description
    :param layers: hidden layers of relation and entity classifier
    :param gpu: gpu number to use
    :return: nothing - output to logger instead
    """
    gpi_type = "Linguistic"
    k_recall = True
    filesmanager = FilesManager()
    # create logger
    logger = Logger()

    # print eval params
    frame = inspect.currentframe()
    args, _, _, values = inspect.getargvalues(frame)
    logger.log('function name "%s"' % inspect.getframeinfo(frame)[2])
    for i in args:
        logger.log("    %s = %s" % (i, values[i]))

    # set gpu
    if gpu != None:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)
        logger.log("os.environ[\"CUDA_VISIBLE_DEVICES\"] = " + str(gpu))

    # create module
    module = Module(gpi_type=gpi_type, nof_predicates=NOF_PREDICATES, nof_objects=NOF_OBJECTS,
                    is_train=False, layers=layers, including_object=True)

    # get input place holders
    confidence_relation_ph, confidence_entity_ph, bb_ph, word_embed_relations_ph, word_embed_entities_ph = module.get_in_ph()
    # get module output
    out_relation_probes, out_entity_probes = module.get_output()

    # Initialize the Computational Graph
    init = tf.global_variables_initializer()
    # Add ops to save and restore all the variables.
    saver = tf.train.Saver()

    # read data
    entities_path = filesmanager.get_file_path("data.visual_genome.test")

    test_files_list = range(35)

    # embeddings
    embed_obj = FilesManager().load_file("language_module.word2vec.object_embeddings")
    embed_pred = FilesManager().load_file("language_module.word2vec.predicate_embeddings")
    embed_pred = np.concatenate((embed_pred, np.zeros(embed_pred[:1].shape)), axis=0)  # concat negative represntation
    accum_results = None
    with tf.Session() as sess:
        if load_module_name is not None:
            # Restore variables from disk.
            if load_module_name=="gpi_linguistic_pretrained":
                module_path = os.path.join(filesmanager.get_file_path("data.visual_genome.data"), "data")
            else:
                module_path = filesmanager.get_file_path("sg_module.train.saver")
            module_path_load = os.path.join(module_path, load_module_name + "_module.ckpt")
            if os.path.exists(module_path_load + ".index"):
                saver.restore(sess, module_path_load)
                logger.log("Model restored.")
            else:
                raise Exception("Module not found")
        else:
            sess.run(init)
        # eval module

        nof = 0
        total = 0
        correct_all = 0
        total_all = 0

        # create one hot vector for null relation
        relation_neg = np.zeros(NOF_PREDICATES)
        relation_neg[NOF_PREDICATES - 1] = 1
        
        index = 0
        basline_path = filesmanager.get_file_path("data.visual_genome.test_baseline")
        for file_name in test_files_list:
            file_path = os.path.join(entities_path, str(file_name) + ".p")
            file_handle = open(file_path, "rb")
            test_entities = cPickle.load(file_handle)
            file_handle.close()

            for entity in test_entities:
                file_path = os.path.join(basline_path, str(entity.image.id) + ".p")
                if not os.path.exists(file_path):
                    continue
                file_handle = open(file_path, "rb")
                detector_data = cPickle.load(file_handle)
                file_handle.close()

                entity.predicates_outputs_with_no_activation = detector_data["rel_dist_mapped"]
                entity.objects_outputs_with_no_activations = detector_data["obj_dist_mapped"]
                # set diagonal to be negative relation
                N = entity.predicates_outputs_with_no_activation.shape[0]
                indices = np.arange(N)
                entity.predicates_outputs_with_no_activation[indices, indices, :] = relation_neg
                entity.predicates_labels[indices, indices, :] = relation_neg

                # create bounding box info per object
                obj_bb = np.zeros((len(entity.objects), 14))
                for obj_id in range(len(entity.objects)):
                    obj_bb[obj_id][0] = entity.objects[obj_id].x / 1200.0
                    obj_bb[obj_id][1] = entity.objects[obj_id].y / 1200.0
                    obj_bb[obj_id][2] = (entity.objects[obj_id].x + entity.objects[obj_id].width) / 1200.0
                    obj_bb[obj_id][3] = (entity.objects[obj_id].y + entity.objects[obj_id].height) / 1200.0
                    obj_bb[obj_id][4] = entity.objects[obj_id].x
                    obj_bb[obj_id][5] = -1 * entity.objects[obj_id].x
                    obj_bb[obj_id][6] = entity.objects[obj_id].y
                    obj_bb[obj_id][7] = -1 * entity.objects[obj_id].y 
                    obj_bb[obj_id][8] = entity.objects[obj_id].width * entity.objects[obj_id].height
                    obj_bb[obj_id][9] = -1 * entity.objects[obj_id].width * entity.objects[obj_id].height                     
                obj_bb[:, 4] = np.argsort(obj_bb[:, 4])
                obj_bb[:, 5] = np.argsort(obj_bb[:, 5])
                obj_bb[:, 6] = np.argsort(obj_bb[:, 6])
                obj_bb[:, 7] = np.argsort(obj_bb[:, 7])
                obj_bb[:, 8] = np.argsort(obj_bb[:, 8])
                obj_bb[:, 9] = np.argsort(obj_bb[:, 9])
                obj_bb[:, 10] = np.argsort(np.max(entity.objects_outputs_with_no_activations, axis=1))
                obj_bb[:, 11] = np.argsort(-1 * np.max(entity.objects_outputs_with_no_activations, axis=1))
                obj_bb[:, 12] = np.arange(obj_bb.shape[0])
                obj_bb[:, 13] = np.arange(obj_bb.shape[0], 0, -1)

                # filter images with no positive relations
                relations_neg_labels = entity.predicates_labels[:, :, NOF_PREDICATES - 1:]
                if np.sum(entity.predicates_labels[:, :, :NOF_PREDICATES - 1]) == 0:
                    continue

                # use object class labels for pred class (multiply be some factor to convert to confidence)
                in_entity_confidence = entity.objects_outputs_with_no_activations

                # create the feed dictionary
                feed_dict = {confidence_relation_ph: entity.predicates_outputs_with_no_activation,
                             confidence_entity_ph: in_entity_confidence,
                             bb_ph: obj_bb,
                             module.phase_ph: False,
                             word_embed_entities_ph: embed_obj, word_embed_relations_ph: embed_pred}

                out_relation_probes_val, out_entity_probes_val = \
                    sess.run([out_relation_probes, out_entity_probes],
                             feed_dict=feed_dict)

                out_relation_probes_val[indices, indices, :] = relation_neg

                results = test(entity.predicates_labels, entity.objects_labels, out_relation_probes_val,
                               out_entity_probes_val)

                # accumulate results
                if accum_results is None:
                    accum_results = results
                else:
                    for key in results:
                        accum_results[key] += results[key]

                # eval image
                k_metric_res, correct_image, total_image, img_per_relation_correct, img_per_relation_total = eval_image(entity,
                    entity.predicates_labels,
                    entity.objects_labels, out_relation_probes_val, out_entity_probes_val, k=min(k, N * N - N))
                # filter images without positive relations
                if total_image == 0:
                    continue

                nof += 1
                total += k_metric_res
                total_score = float(total) / nof
                correct_all += correct_image
                total_all += total_image
                logger.log("\rresult %d - %f (%d / %d) - total %f (%d)" % (
                    index, k_metric_res, correct_image, total_image, total_score, entity.image.id))

                index += 1

            relation_accuracy = float(accum_results['entity_correct']) / accum_results['entity_total']
            relation_pos_accuracy = float(accum_results['relations_pos_correct']) / accum_results[
                'relations_pos_total']
            relationships_pos_accuracy = float(accum_results['relationships_pos_correct']) / accum_results[
                'relations_pos_total']
            logger.log("entity %f - positive relation %f - positive triplet %f" %
                       (relation_accuracy, relation_pos_accuracy, relationships_pos_accuracy))

            time.sleep(3)


        logger.log("(%s) Final Result for k=%d - %f" % (load_module_name, k, total_score))