def partition(self, tree_level, indexing):
        log("aggregating at tree level {}...".format(tree_level))
        self.db.sql_update(self.aggregating_sql)

        if indexing:
            level_index_table_name = "{}_l{}".format(self.index_table_name,
                                                     tree_level)

            # Store the aggregated meta info in separate tables, one for each tree level
            log("storing indexing level in table '{}'...".format(
                level_index_table_name))
            self.db.sql_update(
                "DROP TABLE IF EXISTS {SR}.{level_index_table_name};"
                "CREATE TABLE {SR}.{level_index_table_name} AS "
                "SELECT * FROM {SR}.centroids;"
                "CREATE INDEX ON {SR}.{level_index_table_name} USING btree (cid)"
                .format(SR=self.sr_schema,
                        level_index_table_name=level_index_table_name))

        log("analyzing data table...")
        self.db.sql_update("ANALYZE {D}".format(D=self.data_table_name))

        # Partition only clusters whose size is above the threshold
        log("partitioning at tree level {}...".format(tree_level))
        print
        print self.partitioning_sql.format(tree_level=tree_level)
        new_cids_n = self.db.sql_update(
            self.partitioning_sql.format(tree_level=tree_level))

        log("new partitions: {}...".format(new_cids_n))

        self.db.commit()

        return new_cids_n
    def _materialize_seqbased(self, package, attrs):
        """
		:param package: A SequenceBasedPackage package
		:param attrs: A collection of attributes
		"""
        assert isinstance(package, SequenceBasedPackage)

        if attrs is None:
            use_attrs = {"id"} | self._package_query.get_attributes()
        elif attrs == "*":
            use_attrs = self.data_attributes
        else:
            use_attrs = {"id"}

        self.delete()

        log("materializing package table (seqbased)...")
        self._db.sql_update(
            "INSERT INTO {candidate_table} ({attrs}) \n"
            "SELECT {attrs} FROM (\n\t"
            "	SELECT {attrs}, ROW_NUMBER() OVER (ORDER BY id) AS seq \n\t"
            "	FROM {schema_name}.{core_table}) AS R \n"
            "WHERE seq IN %s".format(candidate_table=self.table_name,
                                     schema_name=self.coretable.schema_name,
                                     core_table=self.coretable.table_name,
                                     attrs=", ".join(use_attrs)),
            tuple(package.combination))
Exemple #3
0
 def execute(self, img: Tensor) -> Tensor:
     """
     Dummy does nothing. Only meant to trigger use of precomputed images
     as code is written in matlab
     """
     log('Dummy code executed. No filter applied', logging.ERROR)
     return img
    def _materialize_list(self, package, attrs):
        """
		:param package: A list of Tuples
		:param attrs: A collection of attributes
		"""
        assert isinstance(package, list) or isinstance(package,
                                                       InMemoryPackage)

        if attrs is None:
            if isinstance(package, InMemoryPackage):
                use_attrs = package.attrs
            else:
                use_attrs = {"id"} | self._package_query.get_attributes()
        elif attrs == "*":
            use_attrs = self.data_attributes
        else:
            use_attrs = {"id"}

        self.delete()

        log("list: creating package table...")
        for t in package:
            assert isinstance(t, Tuple)
            self._db.sql_update(
                "INSERT INTO {candidate_table} ({attrs}) VALUES ({vals})".
                format(candidate_table=self.table_name,
                       attrs=", ".join("{}".format(a) for a in use_attrs),
                       vals=", ".join("{}".format(getattr(t, a))
                                      for a in use_attrs)))
Exemple #5
0
    def to_candidate_package(self):
        """
        Convert a complete (or partial) solution into a Package instance by including the original tuples
        that are included in the partial package
        """
        log("Identifying candidate id's...")
        candidate_ids = []
        for cid, sols in self.sol["O"].iteritems():
            for t, sol in sols:
                # If it says 1, include the id
                if round(sol) == 1:
                    candidate_ids.append(t.id)

                # Otherwise, just skip this tuple
                elif round(sol) == 0:
                    pass
                else:
                    raise Exception(
                        "Solution value unexpected (neither 0 or 1): {}".
                        format(sol))

        # Construct the Package from the tuple ids
        log("Creating SequenceBasedPackage from candidate id's...")
        candidate = SequenceBasedPackage.from_ids(self.reduced_space,
                                                  candidate_ids)
        return candidate
    def __init__(self, *args):
        super(SimpleTableDataModel, self).__init__(*args)

        print "Initing Simple Candidate Table..."

        self.table_name = "simple_candidate_package_{}".format(
            SimpleTableDataModel.candidate_table_counter)
        SimpleTableDataModel.candidate_table_counter += 1

        self._db.sql_update("CREATE TEMP TABLE {candidate_table} AS \n"
                            "SELECT {attrs} \n"
                            "FROM {S}.{D} C \n"
                            "WITH NO DATA".format(
                                candidate_table=self.table_name,
                                attrs="*",
                                S=self.coretable.schema_name,
                                D=self.coretable.table_name))

        self.data_attributes = sorted(
            set(c.column_name for c in self._db.sql_query(
                "SELECT column_name "
                "FROM information_schema.columns "
                "WHERE table_name=%s ", self.table_name)))

        log("Simple Candidate Table inited.")
Exemple #7
0
def pretrain(model, state_dict, fc_set=True):
    own_state = model.state_dict()

    for name, param in state_dict.items():
        real_name = name.replace('module.', '')

        # break after adding cnn layer weights
        if "fc_live" in real_name:
            break

        if real_name in own_state:
            if isinstance(param, torch.nn.Parameter):
                # backwards compatibility for serialized parameters
                param = param.data
            try:
                own_state[real_name].copy_(param)
            except Exception as e:
                log(
                    f'While copying the parameter named {real_name}, '
                    f'whose dimensions in the model are '
                    f'{own_state[name].size()} and '
                    f'whose dimensions in the checkpoint are {param.size()}.\n'
                    f'ErrMsg: {e}', logging.ERROR)
                log("But don't worry about it. Continue pretraining.")

    # set weights of fc_live
    if fc_set:
        num_feats = model.fc_live.in_features
        fc2_w = random_weight((num_feats, 2))
        fc2_b = zero_weight((2, ))

        model.fc_live = nn.Linear(num_feats, 2)
        model.fc_live.weights = nn.Parameter(fc2_w)
        model.fc_live.bias = nn.Parameter(fc2_b)
Exemple #8
0
    def handle_postback(self, postback_payload, user_id):
        """
        Handles following feedback:
        1. Moderation
        2. Rating of answer
        3. Add Tags
        4. Remove Tags

        :param postback_payload: The postback payload provided with the postback event.
        :param user_id: The unique user id of the user who will receive the feedback
        :return: feedback for the user
        """
        data = json.loads(postback_payload)
        if data['intent'] == 'moderation':
            self.moderation_update(data['entities']['tag'][0]['value'])
            response = "Thanks"
        elif data['intent'] == 'rating':
            response = self.answer_rating_update(
                data['entities']['tag'][0]['value'])
        elif data['intent'] == 'tag_add':
            response = self.user_handler.add_tag(
                data['entities']['tag'][0]['value'], user_id)
        elif data['intent'] == 'tag_remove':
            response = self.user_handler.remove_tag(
                data['entities']['tag'][0]['value'], user_id)
        else:
            log("Invalid Postback Intent -> {0}".format(data['intent']))
            response = None

        return response
Exemple #9
0
 def set_lr(self, lr):
     current_lr = keras.backend.eval(self.model.optimizer.lr)
     log.log('set learning rate from %f to %f' % (current_lr, lr),
             log.LogType.WARNING)
     self.learning_rate = lr
     keras.backend.set_value(self.model.optimizer.lr, self.learning_rate)
     keras.backend.set_value(self.target_model.optimizer.lr,
                             self.learning_rate)
Exemple #10
0
 def reduce_lr(self):
     new_learning_rate = self.learning_rate * self.learning_decay
     log.log('reduce learning rate from %f to %f' %
             (self.learning_rate, new_learning_rate))
     self.learning_rate = new_learning_rate
     keras.backend.set_value(self.model.optimizer.lr, self.learning_rate)
     keras.backend.set_value(self.target_model.optimizer.lr,
                             self.learning_rate)
 def __init__(self, name, *, should_log_end=False, start_log_level=None):
     self.start_time = datetime.now()
     self.name = name
     self.end_time = None
     self.should_log_end = should_log_end
     self.run_msg = "Still Running"
     log(f"{self.name} stop watch start time: {self.start_time}",
         start_log_level)
Exemple #12
0
 def store_linear_problem_string(self, dirpath, filename=None):
     if filename is None:
         lp_file_name = "{}/lpsearch-cplex-{}.lp".format(
             dirpath, self.n_solver_solve_calls)
     else:
         lp_file_name = "{}/{}".format(dirpath, filename)
     self.problem.write(lp_file_name)
     log("Problem stored in file '{}'".format(lp_file_name))
     return lp_file_name
def test_file(filename):
    try:
        # Load image
        img = cv2.imread(filename)
        err_count_before = ErrorsStats.COUNT
        perform_crop(img, filename)
        return err_count_before == ErrorsStats.COUNT
    except Exception as e:
        log(f'Error testing Bounding Box of "{filename}". Msg: "{e}"')
        return False
Exemple #14
0
 def load_model(self, model_path):
     try:
         self.model = keras.models.load_model(model_path)
         self.target_model = self.create_model()
         #self.target_model = keras.models.load_model(model_path)
         self.set_lr(self.learning_rate)
     except Exception as e:
         log.log('cannot load model from %s' % model_path,
                 log.LogType.ERROR)
         log.log(str(e), log.LogType.ERROR)
         self.init_model()
Exemple #15
0
 def load_model(self, model_path):
     try:
         self.target_model = self.create_model()
         self.target_model.build(input_shape=(None, 1, max_ops_len))
         self.target_model.load_weights(model_path)
         self.model = self.target_model
     except Exception as e:
         log.log('cannot load model from %s' % model_path,
                 log.LogType.ERROR)
         log.log(str(e), log.LogType.ERROR)
         self.init_model()
def finalize():
    """
    Closes up things from the run like
        - Stop the main timer
        - Marks the log end
        - Check if there were errors and inform user
    :return:
    """
    _stopwatch_main.end()
    log('\n<<<<<<<<<<<<<<<<<<<<<< COMPLETED >>>>>>>>>>>>>>>>>>>>>>')
    check_error()
Exemple #17
0
    def add_answer(self, answer, responder_id):
        """
        Adds the answer to the database.

        :param answer: The answer sent by the responder
        :param responder_id: The unique facebook id of the responder of the answer.
        :return: Feedback response for answer message.
        """
        m = re.search('\[([0-9]*)\]', answer)
        try:
            question_id = m.group(1)
        except Exception, ex:
            log("Exception Occured -> {0}".format(ex))
            question_id = None
Exemple #18
0
 def mark_question_as_resolved(self, question_id):
     """
     If the user adds OOW to a question's answer, then the question is marked as resolved.
     The user will not get any further answers for that question.
     :param question_id: The question's unique id.
     :return: None
     """
     try:
         self.cur.execute(
             "UPDATE question SET has_answer=TRUE WHERE question_id=%s",
             (question_id,)
         )
     except Exception, ex:
         log("question resolution db update failed. Exception thrown: {0}".format(ex))
Exemple #19
0
    def learningCurve(self, n_from=1, n_to=4, n_steps=1):
        log("Begining learning curve")
        self.reid_network.load_weights("weights/feature.hdf5")
        # if self.pretrain:
        #     self.train(flag="feature")
        # self.reid_network.save_weights("weights/feature.hdf5")
        Wsave = self.reid_network.get_weights()

        plot_loss = []
        plot_val_loss = []

        generator_val = ReidGenerator(database=self.dataset,
                                      batch_size=self.batch_size * 10,
                                      flag="validation")
        batch_val = next(generator_val)

        for n_examples in range(n_from, n_to, n_steps):
            log("\tTraining with {} batchs".format(n_examples))
            self.reid_network.set_weights(Wsave)

            # Trains on n examples
            batch_train = next(
                ReidGenerator(database=self.dataset,
                              batch_size=n_examples,
                              flag="train"))
            self.reid_network.fit(x=batch_train[0],
                                  y=batch_train[1],
                                  batch_size=self.batch_size,
                                  epochs=3,
                                  verbose=1)

            # Evaluate on these examples
            rslt = self.reid_network.evaluate(x=batch_train[0],
                                              y=batch_train[1],
                                              batch_size=self.batch_size,
                                              verbose=1)
            plot_loss.append(rslt[1])

            # Evaluate on all CV set
            rslt = self.reid_network.evaluate(x=batch_val[0],
                                              y=batch_val[1],
                                              batch_size=self.batch_size,
                                              verbose=1)
            plot_val_loss.append(rslt[1])

        print(*plot_loss, sep="; ")
        print(*plot_val_loss, sep="; ")
        plot.learningCurve(plot_loss, plot_val_loss)
        plot.showPlot()
    def get_reduced_space_representatives_from_clusters(self, cids):
        if len(cids) == 0:
            return []

        partitioning_attrs = ["cid"] + self.search.attrs

        res = []
        log("Loading representatives...")
        for r in self.search.db.sql_query("SELECT * FROM {SR}.{R}".format(
                SR=self.search.sr_schema, R=self.search.repr_table_name)):

            if r.cid in cids:
                res.append(Repr(attrs=partitioning_attrs, record=r))

        return res
    def __init__(self,
                 db,
                 dataset_size,
                 nbits,
                 cid_type_cast,
                 data_table_name,
                 repr_table_name,
                 clust_attrs,
                 data_attrs,
                 max_clust_size,
                 min_n_clusters,
                 epsilon,
                 index_table_name,
                 indexing_attrs,
                 sr_schema,
                 obj_type=None):
        self.sr_schema = sr_schema
        self.labels_ = None
        self.N = dataset_size
        self.nbits = nbits
        self.db = db
        self.data_table_name = data_table_name
        self.repr_table_name = repr_table_name
        self.clust_attrs = clust_attrs
        self.data_attrs = data_attrs
        self.global_depth = 0
        self.max_clust_size = max_clust_size
        self.min_n_clusters = min_n_clusters
        self.epsilon = epsilon
        self.partitioned_cids = set()
        self.index_table_name = index_table_name
        self.obj_type = obj_type

        if self.epsilon is not None:
            assert self.obj_type is not None

        self.indexing_attrs = indexing_attrs

        self.clust_attrs_mask = "".join(
            "1" if attr in self.clust_attrs else "0"
            for attr in self.indexing_attrs)
        log("Clust attrs mask: {}".format(self.clust_attrs_mask))

        self.partitioning_sql = None  # Will be set later, in fit()
        self.aggregating_sql = None  # Will be set later, in fit()

        self.cid_type_cast = cid_type_cast
        self.mask_type_cast = "BIT({})".format(self.nbits)
Exemple #22
0
def handle_message(payload, sender_id, message_type="non-feedback"):
    """
    Handles payload, and redirect them to proper handler.

    :param payload: Message (or Quick Reply) payload
    :param sender_id: The unique facebook user id of the person who sent the payload
    :param message_type: The type of the GET request. "non-feedback" -> feedback not required from other user,
                                              "feedback" -> feedback required from other user.
    :return: None
    """
    status = handle.database().user().add_user_if_new(
        sender_id)  # always add the user to the database, if new.
    # status is not OK when the user is barred.
    if status != "OK":
        send_message(sender_id, status)
        return
    if message_type == "non-feedback":
        if payload.startswith('['):
            response_text = handle.database().answer().add_answer(
                payload, sender_id)
        elif payload.lower() == "done":
            cur = handle.database().db.get_cursor()
            cur.execute("SELECT answering_buffer FROM users WHERE user_id=%s",
                        (sender_id, ))
            answer = cur.fetchone()[0]
            if answer == '':
                return
            response_text = handle.event().database().check_for_moderation(
                answer, sender_id, handle)
            if response_text == "OK":
                response_text = handle.database().answer().add_answer(
                    payload, sender_id)
        elif handle.database().user().get_current_answering_question(
                sender_id) is not None:
            response_text = handle.database().answer().add_answer(
                payload, sender_id)
        else:
            response_text = handle.wit().brain().get_response(
                sender_id, payload)
    elif message_type == "feedback":
        response_text = handle.event().postback().handle_postback(
            payload, sender_id)
    else:
        response_text = ""
        log("invalid message type sent to handler")

    send_message(sender_id, response_text)
Exemple #23
0
def check_accuracy(loader, model):
    """
    Checks accuracy on all the data from the data loader

    :param loader: the loader (Expected to provide 1 batch of the full data)
    :param model: The model to be tested
    :return: Accuracy, Scores, y_test (The ground truth labels)
    """
    num_correct = 0
    num_samples = 0
    model.eval()  # set model to evaluation mode
    scores_all = None
    y_test = None
    total_time_network = 0
    with torch.no_grad():
        for x, y in loader:
            x = x.to(device=get_torch_device(), dtype=Conf.Training.DTYPE)
            y = y.to(device=get_torch_device(), dtype=torch.long)

            # Accumulate scores
            start_time = time.time()
            scores_x = model(x)
            total_time_network += (time.time() - start_time
                                   )  # add time it takes to run
            # evaluation

            if scores_all is None:
                scores_all = scores_x
            else:
                scores_all = torch.cat([scores_all, scores_x], dim=0)

            # Accumulate labels
            if y_test is None:
                y_test = y
            else:
                y_test = torch.cat([y_test, y], dim=0)
            _, preds = scores_x.max(1)
            # noinspection PyUnresolvedReferences
            num_correct += (preds == y).sum()
            num_samples += preds.size(0)

        acc = float(num_correct) / num_samples
        log('Got %d / %d correct (%.2f)' %
            (num_correct, num_samples, 100 * acc))
        return acc, scores_all, y_test, total_time_network
Exemple #24
0
def main():
    should_display_img = False
    should_save_img = True
    stop_after_desired = True
    desired_img_count = 200
    initialize()
    log("Main - Starting Test")
    log(f"Running in test mode. Going to show {desired_img_count} images")
    filters_as_str = None  # If left as non defaults will be used
    if filters_as_str is None:
        filters_as_str = Conf.RunParams.MODEL_TRAIN_DEFAULTS['filters']
    filters = strs_to_classes(filters_as_str)
    log(f'Going to test: {filters_as_str}')
    dataset_train = get_dataset(TDataUse.TRAIN, filters)
    dataset_test = get_dataset(TDataUse.TEST, filters)
    log(f'There are {len(dataset_train)} training files and  '
        f'{len(dataset_test)} testing files')
    count_live = count_spoof = 0
    img_count = 0
    if should_save_img:
        # Make output folders
        live_save_folder = f'{get_output_folder()}live/'
        spoof_save_folder = f'{get_output_folder()}spoof/'
        mkdir_w_par(live_save_folder)
        mkdir_w_par(spoof_save_folder)
    for img, label in dataset_train:
        if label == 1:
            count_spoof += 1
        else:
            count_live += 1
        if img_count < desired_img_count:
            img = transforms.ToPILImage()(img)
            if should_display_img:
                img.show()
            if should_save_img:
                img.save(
                    f'{live_save_folder if label == 0 else spoof_save_folder}'
                    f'{img_count}.png')
            img_count += 1
        elif stop_after_desired:
            break
    log(f'There are {count_live} live images for training and {count_spoof} '
        f'spoof images')

    finalize()
Exemple #25
0
    def add_question(self, question, asker_id):
        """
        Adds the question to the database

        :param question: The test of the question
        :param asker_id: The person who asked the question
        :return: None
        """
        is_question_stored = False
        try:
            self.cur.execute(
                "INSERT INTO question (question, asker_id, has_answer) VALUES (%s, %s, %s) RETURNING question_id;",
                (question, str(asker_id), False))
            question_id = self.cur.fetchone()[0]
            is_question_stored = True
            DatabaseEvent.new_question(question_id, question, asker_id)
        except Exception, ex:
            log("Failed to insert question in the database. Exception thrown: {0}".format(ex))
Exemple #26
0
    def new_answer(self, question_id, answer, sender_id):
        """
        Triggered when a new answer is given.
        Response: If has_answer = True, do nothing (as user is satisfied)
                                else, send asker the answer, and ask if he is satisfied.

        :param question_id: The unique id of the question that was answered.
        :param answer: Answer Text
        :param sender_id: The user id of the person who answered the question.
        :return: None
        """
        try:
            self.cur.execute(
                "SELECT asker_id, question, has_answer FROM question WHERE question_id=%s;",
                (question_id, ))
            asker_id, question, has_answer = self.cur.fetchone()
            self.cur.execute(
                "UPDATE users SET answered_questions = array_append(answered_questions, %s)"
                " WHERE user_id=%s AND NOT answered_questions @> ARRAY[%s::INT];",
                (question_id, sender_id, question_id))
            self.cur.execute(
                "UPDATE users SET answering_questions = array_remove(answering_questions, %s)"
                " WHERE user_id=%s;", (question_id, sender_id))

            if has_answer:
                return
            if asker_id == sender_id:
                return

            try:
                respond_message = "Here is the answer to your question\nQuestion:{0}".format(
                    question)
                answer_message = "Answer:\n {0}".format(answer)
                send_message(int(asker_id), respond_message)
                send_message(int(asker_id), answer_message)
                get_answer_feedback(int(asker_id), int(sender_id), question_id,
                                    question)
            except Exception, ex:
                log("Failed sending answer to asker. Exception:{0}".format(ex))
        except Exception, ex:
            log("Failed to fetch the answered question from the database. Exception:{0}"
                .format(ex))
Exemple #27
0
    def new_question(self, question_id, question, asker_id):
        """
        Triggered when a new question is asked.
        Response: If number of users = 1, do nothing
                                     = 2, send question to the non-asker
                                     = 2+, send question to (n)/2 non-askers, where n = # non askers

        :param question_id: The unique id of the question that was posted.
        :param question: The question asked,
        :param asker_id: The unique facebook id of the person asking the question.
        :return: None
        """
        try:
            self.cur.execute(
                "SELECT user_id FROM users WHERE user_id <> %s AND subscription=TRUE;",
                (asker_id, ))
            non_askers = [x[0] for x in self.cur.fetchall()]
            if not non_askers:
                return
            request_message = "Can you answer this question? coz I can't as I don't have brains "
            question_message = "[Question][qid:{0}] {1}".format(
                question_id, question)
            if len(non_askers) == 1:
                send_message(int(non_askers[0]), request_message)
                send_message(int(non_askers[0]), question_message)
                self.cur.execute(
                    "UPDATE users SET sent_questions = array_append(sent_questions, %s)"
                    " WHERE user_id=%s AND NOT sent_questions @> ARRAY[%s];",
                    (question_id, non_askers[0], question_id))
            else:
                chosen_non_askers = random.sample(
                    non_askers, min(4, (len(non_askers)) / 2))
                for recipient in chosen_non_askers:
                    send_message(int(recipient), request_message)
                    send_message(int(recipient), question_message)
                    self.cur.execute(
                        "UPDATE users SET sent_questions = array_append(sent_questions, %s)"
                        " WHERE user_id=%s AND NOT sent_questions @> ARRAY[%s];",
                        (question_id, recipient, question_id))
        except Exception, ex:
            log("Failed to fetch all non askers. Exception: {0}".format(ex))
Exemple #28
0
def webhook():
    """
    Triggers on each GET and POST request. Handles GET and POST requests using this function.

    :return: Return status code acknowledge for the GET and POST request
    """
    if request.method == 'POST':
        data = request.get_json(force=True)
        log(json.dumps(data))  # you may not want to log every incoming message in production, but it's good for testing

        if data["object"] == "page":
            for entry in data["entry"]:
                for event in entry["messaging"]:
                    sender_id = event["sender"]["id"]

                    if 'message' in event and 'text' in event['message']:
                        message_text = event["message"]["text"]
                        if event.get("message").get("quick_reply"):
                            feedback_payload = event["message"]["quick_reply"]["payload"]
                            handle_message(feedback_payload, sender_id, message_type="feedback")
                        else:
                            handle_message(message_text, sender_id)

                    if 'postback' in event and 'payload' in event['postback']:
                        postback_payload = event['postback']['payload']
                        log(postback_payload)
                        handle_message(postback_payload, sender_id, message_type="feedback")

                    if event.get("delivery"):
                        pass

                    if event.get("optin"):
                        pass

        return "ok", 200

    elif request.method == 'GET':  # Verification
        if request.args.get("hub.verify_token") == VERIFY_TOKEN:
            return request.args.get('hub.challenge'), 200
        else:
            return 'Error, wrong validation token', 403
def clean_list(in_filename, out_filename_base, base_in_folder):
    # Read input file
    with open(in_filename, 'r') as f:
        in_lines = f.readlines()
    log(f'{len(in_lines)} lines read from "{in_filename}"')

    # Prepare lists to store results
    good = []
    bad = []

    # Walk through input and separate input
    for line in in_lines:
        filename = get_filename(line)
        if test_file(base_in_folder + filename):
            good.append(line)
        else:
            bad.append(line)

    # Save output files
    save_list_to_file(good, out_filename_base + 'good.txt')
    save_list_to_file(bad, out_filename_base + 'bad.txt')
Exemple #30
0
 def answer_rating_update(self, payload):
     """
     Sends feedback to the answerer after updating its karma, when the asker rates the answer.
     :param payload: Answer rating payload (points, rating, answerer_id, question_id, question) in string format.
     :return:
     """
     points, rating, answerer_id, question_id, question = [
         x.strip() for x in payload.split(',')
     ]
     if rating == 'OOW':
         self.question_handler.mark_question_as_resolved(int(question_id))
     karma, is_karma_updated = self.user_handler.update_karma(
         int(points), answerer_id)
     try:
         if is_karma_updated:
             respond_message = answer_feedback_to_answerer(
                 rating, question, karma)
             send_message(int(answerer_id), respond_message)
     except Exception, ex:
         log("failed sending feedback response to responder. Exception:{0}".
             format(ex))