def pixivSubmissionsFromJson(bookmarks):
    submissions = []
    for illustration in bookmarks.illusts:
        if illustration.type not in ['illust', 'manga']:
            # TODO: Add more format support
            logger.log("Skipping " + illustration.type)
            continue

        # Album
        if illustration.meta_pages:
            imageIndex = 0
            for imagePage in illustration.meta_pages:
                newSubmission = Submission()
                fillPixivSubmission(illustration, newSubmission)

                newSubmission.title = '{}_{}'.format(newSubmission.title,
                                                     imageIndex)
                newSubmission.bodyUrl = imagePage.image_urls.original
                imageIndex += 1

                submissions.append(newSubmission)

        # Single image
        elif illustration.meta_single_page:
            newSubmission = Submission()
            fillPixivSubmission(illustration, newSubmission)

            # The image that will be downloaded
            newSubmission.bodyUrl = illustration.meta_single_page.original_image_url

            submissions.append(newSubmission)

    logger.log("Got {} Pixiv bookmarks".format(len(submissions)))
    return submissions
Ejemplo n.º 2
0
    def __init__(self, reddit_settings={}):
        self.reddit = praw.Reddit(
            client_id='Vyw-20ZFtH4msA',
            client_secret='-vZkEG8s6qlRvbTcuGxmJOnpAds',
            user_agent='ubuntu:arguing-agents:v1 (by /u/HolzmindenScherfede)')

        if reddit_settings == {}:
            print(
                "Reddit: __init__: No settings given. Creating empty object.")
            return

        if 'mode' not in reddit_settings:
            print(
                "Reddit: __init__: Mode not provided. Initializing it to 'find'."
            )

        mode = reddit_settings['mode']

        if mode == 'find':
            if 'topic' in reddit_settings:
                topic = reddit_settings['topic']
            else:
                print("Reddit: provide topic")
                exit(-1003)

            if 'amount' in reddit_settings:
                amount = reddit_settings['amount']
            else:
                amount = 10

            if 'sortby' in reddit_settings:
                sortby = reddit_settings['sortby']
            else:
                sortby = 'hot'

            praw_submissions = list(
                self.reddit.subreddit('ChangeMyView').search(topic,
                                                             sortby))[:amount]

            self.submissions = []

            for praw_submission in praw_submissions:
                submission = Submission(praw_submission)
                self.submissions.append(submission)
        elif mode == 'url':
            if 'submission_urls' not in reddit_settings:
                print('Reddit: provide submission_urls!')
                exit(-12031)

            submission_urls = reddit_settings['submission_urls']

            self.submissions = []

            for submission_url in submission_urls:
                submission = Submission(
                    praw.models.Submission(self.reddit, url=submission_url))
                self.submissions.append(submission)
Ejemplo n.º 3
0
    def from_dict(self, dic):
        self.submissions = []

        for dic_submission in dic['submissions']:
            submission = Submission()
            submission.from_dict(dic_submission, self.reddit)
            self.submissions.append(submission)
Ejemplo n.º 4
0
    def evaluate(self) -> bool:
        """

        The function will error out if either the submission
        function raises an error or the solution and submission
        outputs do not match

        Thank God for stackoverflow
        """

        solution = Solution()
        submission = Submission()

        # retrive the running functions in the Solution and Submission classes
        # The names of these functions are defined in inputs.json
        solution_func = getattr(solution, self.func_name)
        submission_func = getattr(submission, self.func_name)

        for testcase in self.inputs:
            # unpack each testcase and execute
            solution_res = solution_func(*testcase)
            submission_res = submission_func(*testcase)

            if isinstance(solution_res, Iterable):
                if sorted(solution_res) != sorted(submission_res):
                    raise ImplementationError(
                        f"Error for testcase {testcase}. "
                        f"Expected {solution_res}, "
                        f"got {submission_res}")
            else:
                if solution_res != submission_res:
                    raise ImplementationError(
                        f"Error for testcase {testcase}. "
                        f"Expected {solution_res}, "
                        f"got {submission_res}")
Ejemplo n.º 5
0
 def load_data(self):
     """Initialize blocchain + open submissions data from a file."""
     try:
         with open('blocchain-{}.bit'.format(self.node_id), mode='r') as f:
             # file_content = pickle.loads(f.read())
             file_content = f.readlines()
             # blocchain = file_content['chain']
             # open_submissions = file_content['ot']
             blocchain = json.loads(file_content[0][:-1])
             # We need to convert  the loaded data because submissions
             # should use OrderedDict
             updated_blocchain = []
             for bloc in blocchain:
                 converted_tx = [Submission(
                     tx['voter'],
                     tx['candidate'],
                     tx['zero'],         #added to hold day zero, countdown until final vote
                     tx['signature'],
                     tx['amount']) for tx in bloc['submissions']]
                 updated_bloc = Bloc(
                     bloc['index'],
                     bloc['previous_hash'],
                     converted_tx,
                     bloc['proof'],
                     bloc['timestamp'])
                 updated_blocchain.append(updated_bloc)
             self.chain = updated_blocchain
             open_submissions = json.loads(file_content[1][:-1])
             # We need to convert  the loaded data because submissions
             # should use OrderedDict
             updated_submissions = []
             for tx in open_submissions:
                 updated_submission = Submission(
                     tx['voter'],
                     tx['candidate'],
                     tx['zero'],
                     tx['signature'],
                     tx['amount'])
                 updated_submissions.append(updated_submission)
             self.__open_submissions = updated_submissions
             peer_nodes = json.loads(file_content[2])
             self.__peer_nodes = set(peer_nodes)
     except (IOError, IndexError):
         pass
     finally:
         print('Cleanup!')
Ejemplo n.º 6
0
def add_submission_by_user(user: User, subm: str, cat: str,
                           lang: str) -> Submission:
    submission = Submission(user=user,
                            value=subm,
                            category=cat,
                            language=lang,
                            mwe=mwe_helper.get_todays_mwe(lang))
    session.add(submission)
    session.commit()
    return submission
Ejemplo n.º 7
0
def submit_handler_2(user: User, update: Update, context: CallbackContext):
    todays_mwe = mwe_helper.get_todays_mwe(user.language)
    submission = Submission(value=update.message.text)
    context.user_data["submission"] = submission
    context.user_data["state"] = "submit_example_type_1"
    update.message.reply_text(
        get_language_token(user.language, Tokens.SPECIAL_MEANING) %
        todays_mwe.name,
        parse_mode=telegram.ParseMode.MARKDOWN,
        reply_markup=get_submit_category_1_keyboard_markup(
            user.language, todays_mwe))
Ejemplo n.º 8
0
def run(pid, title, lang, code):
    controller = Controller.controller
    try:
        submission = Submission(pid, title, lang, code)
        controller.execute(submission)
        return submission.sid
    except Failed as e:
        logging.warning(e.message)
        raise
    except Exception as e:
        traceback.print_exc()
        raise Failed('unknown error')
Ejemplo n.º 9
0
    def handleSubmission(id, lang, code):
        # Maximum time 
        try:
            with tlim(5):
                logger.info(f'[PROCESSING] {multiprocessing.current_process().name}')

                # Get compiler
                c = Client.__compiler.getCompiler(lang)
                s = Submission(id, lang, code, c)
                s.start()

        except TimeoutException:
            return
        finally:
            pass
Ejemplo n.º 10
0
    def updateSolvedProblems(self, html):
        #TODO: compile
        submissionPattern = b'\t<tr class="sectiontableentry\d+">\n' \
                            b'\t<td align="center"><a href="[^"]*">(\d+)</a></td>\n' \
                            b'\t<td align="center">\d+</td>\n' \
                            b'\t<td align="center">\d+</td>\n' \
                            b'\t<td align="center">([^<]*)</td>\n' \
                            b'\t<td align="center">[^<]*</td>\n\t'
        tablePattern = b'<div class="contentheading">Solved problems</div>\n\t<table border="0" cellspacing="0" cellpadding="4" style="width:70%" align="center">\n\t<tr class="sectiontableheader"><th align="center">Problem</th><th align="center">Ranking</th><th align="center">Submission</th><th align="center">Date</th><th align="center">Run time</th></tr>\n\t' \
                       b'(' + submissionPattern + b')*' \
                                                  b'\t</table>'
        tableHTML = re.search(tablePattern, html).group(0)

        self.submissions = list()
        for match in re.finditer(submissionPattern, tableHTML):
            self.submissions.append(Submission(match.group(1).decode('utf-8'), match.group(2).decode('utf-8')))
Ejemplo n.º 11
0
def import_old():
    existing_tags = {}
    for tag in Tag.query.all():
        exisiting_tags[tag.name] = tag
    for resource in [
            'Projector', 'Microphone', 'Sound system', 'Drinking water',
            'Quiet (no airwalls)'
    ]:
        penguicontrax.db.session.add(Resource(resource))
    for track in ['literature', 'tech', 'music', 'food', 'science']:
        penguicontrax.db.session.add(Track(track, None))
    with penguicontrax.app.open_resource('schedule2013.html', mode='r') as f:
        tree = ET.fromstring(f.read())
        events = tree.find('document')
        for section in events:
            if section.tag == 'div' and section.attrib['class'] == 'section':
                name = section[0].text
                tag_list = section[
                    1].text  # Tag doesn't seem to be in the DB yet
                person = section[3][0].text
                # Only one presenter is supported so far
                firstPerson = person.split(',')[0].split(' ')
                description = section[3][0].tail
                submission = Submission()
                submission.email = '*****@*****.**'
                submission.title = name
                submission.description = description
                submission.duration = 1
                submission.setupTime = 0
                submission.repetition = 0
                submission.firstname = firstPerson[0]
                submission.lastname = firstPerson[1] if len(
                    firstPerson) > 1 else ''
                submission.followUpState = 0
                submission.tags = []
                for tag in tag_list.split(','):
                    tag = normalize_tag_name(tag)
                    db_tag = None
                    if not tag in existing_tags:
                        db_tag = Tag(tag)
                        penguicontrax.db.session.add(db_tag)
                        existing_tags[tag] = db_tag
                    else:
                        db_tag = existing_tags[tag]
                    submission.tags.append(db_tag)
                penguicontrax.db.session.add(submission)
        penguicontrax.db.session.commit()
Ejemplo n.º 12
0
 def resolve(self):
     """Checks all peer nodes' blocchains and replaces the local one with
     longer valid ones."""
     # Initialize the winner chain with the local chain
     winner_chain = self.chain
     replace = False
     for node in self.__peer_nodes:
         url = 'http://{}/chain'.format(node)
         try:
             # Send a request and store the response
             response = requests.get(url)
             # Retrieve the JSON data as a dictionary
             node_chain = response.json()
             # Convert the dictionary list to a list of bloc AND
             # submission objects
             node_chain = [
                 Bloc(bloc['index'],
                       bloc['previous_hash'],
                       [
                     Submission(
                         tx['voter'],
                         tx['candidate'],
                         tx['zero'],
                         tx['signature'],
                         tx['amount']) for tx in bloc['submissions']
                 ],
                     bloc['proof'],
                     bloc['timestamp']) for bloc in node_chain
             ]
             node_chain_length = len(node_chain)
             local_chain_length = len(winner_chain)
             # Store the received chain as the current winner chain if it's
             # longer AND valid
             if (node_chain_length > local_chain_length and
                     Verification.verify_chain(node_chain)):
                 winner_chain = node_chain
                 replace = True
         except requests.exceptions.ConnectionError:
             continue
     self.resolve_conflicts = False
     # Replace the local chain with the winner chain
     self.chain = winner_chain
     if replace:
         self.__open_submissions = []
     self.save_data()
     return replace
Ejemplo n.º 13
0
    def add_submission(self,
                        candidate,
                        voter,
                        zero,
                        signature,
                        amount=1.0,
                        is_receiving=False):
        """ Append a new value as well as the last blocchain value to the blocchain.

        Arguments:
            :voter: The person voting.
            :candidate: The candidate recieving the votes.
            :amount: The amount of votes sent with the submission
            (default = 1.0)
        """
        # submission = {
        #     'voter': voter,
        #     'candidate': candidate,
        #     'amount': amount
        # }
        # if self.public_key == None:
        #     return False
        submission = Submission(voter, candidate, zero, signature, amount)
        if Verification.verify_submission(submission, self.get_balance):
            self.__open_submissions.append(submission)
            self.save_data()
            if not is_receiving:
                for node in self.__peer_nodes:
                    url = 'http://{}/broadcast-submission'.format(node)
                    try:
                        response = requests.post(url,
                                                 json={
                                                     'voter': voter,
                                                     'candidate': candidate,
                                                     'zero': zero,
                                                     'amount': amount,
                                                     'signature': signature
                                                 })
                        if (response.status_code == 400 or
                                response.status_code == 500):
                            print('Submission declined, needs resolving')
                            return False
                    except requests.exceptions.ConnectionError:
                        continue
            return True
        return False
Ejemplo n.º 14
0
 def add_bloc(self, bloc):
     """Add a bloc which was received via broadcasting to the localb
     lockchain."""
     # Create a list of submission objects
     submissions = [Submission(
         tx['voter'],
         tx['candidate'],
         tx['zero'],
         tx['signature'],
         tx['amount']) for tx in bloc['submissions']]
     # Validate the proof of work of the bloc and store the result (True
     # or False) in a variable
     proof_is_valid = Verification.valid_proof(
         submissions[:-1], bloc['previous_hash'], bloc['proof'])
     # Check if previous_hash stored in the bloc is equal to the local
     # blocchain's last bloc's hash and store the result in a bloc
     hashes_match = hash_bloc(self.chain[-1]) == bloc['previous_hash']
     if not proof_is_valid or not hashes_match:
         return False
     # Create a bloc object
     converted_bloc = Bloc(
         bloc['index'],
         bloc['previous_hash'],
         submissions,
         bloc['proof'],
         bloc['timestamp'])
     self.__chain.append(converted_bloc)
     stored_submissions = self.__open_submissions[:]
     # Check which open submissions were included in the received bloc
     # and remove them
     # This could be improved by giving each submission an ID that would
     # uniquely identify it
     for itx in bloc['submissions']:
         for opentx in stored_submissions:
             if (opentx.voter == itx['voter'] and
                     opentx.candidate == itx['candidate'] and
                     opentx.zero == itx['zero'] and
                     opentx.amount == itx['amount'] and
                     opentx.signature == itx['signature']):
                 try:
                     self.__open_submissions.remove(opentx)
                 except ValueError:
                     print('Item was already removed')
     self.save_data()
     return True
Ejemplo n.º 15
0
 def criaSubmission(self, id):
     urlSubmission = "https://www.urionlinejudge.com.br/judge/pt/runs/code/" + id
     submissionPage = self.sess.get(urlSubmission)
     submissionSoup = BeautifulSoup(submissionPage.text,
                                    features="html.parser")
     dados = submissionSoup.find("div", {
         "class": "st-big-box"
     }).find_all("dd")
     idEnome = dados[0].text.split("-")
     resultado = dados[1].text.strip()
     language = dados[2].text.split("(")[0].strip()
     data = dados[5].text.strip()
     id = idEnome[0].strip()
     nome = idEnome[1].strip()
     codeLines = submissionSoup.find("pre", {"id": "code"})
     code = codeLines.text
     submission = Submission(id, nome, resultado, data, language, code)
     return submission
Ejemplo n.º 16
0
    def massageData(self, inflate):
        try:
            submissions = inflate['attached_media']
            from submission import Submission

            contributors = []
            for s in submissions:
                submission = Submission(_id=s)
                print submission.emit()

                contributors.append(
                    submission.j3m.genealogy['createdOnDevice'])

            inflate['contributors'] = list(set(contributors))

        except KeyError as e:
            print e
            pass

        return inflate
    def submit(self, revision, data, mode_info, log=[]):
        repo = mode_info["repo"]
        settings = {
            "treeherder": {
                'group_symbol': 'AWFY',
                'group_name': 'AWFY',
                'job_name': mode_info["job_name"],
                'job_symbol': mode_info["job_symbol"],
                "tier": mode_info["tier"],
                "platform": mode_info["platform"]
            }
        }

        logfile = create_log_item({
            "repo": repo,
            "revision": revision,
            "settings": settings,
            "perf_data": data,
            "extra_log_info": log
        })
        loglink = "https://arewefastyet.com/data.php?file=treeherder-logs/" + logfile

        retriggerlink = None
        if "mode_id" in log and "machine_id" in log:
            retriggerlink = "https://arewefastyet.com/retrigger/?machine_id=" + str(
                log["machine_id"]) + "&mode_id=" + str(
                    log["mode_id"]) + "&revision=" + revision

        th = Submission(repo,
                        revision,
                        treeherder_url=awfy.th_host,
                        treeherder_client_id=awfy.th_user,
                        treeherder_secret=awfy.th_secret,
                        settings=settings)

        job = th.create_job(None)
        th.submit_completed_job(job,
                                data,
                                loglink=loglink,
                                retriggerlink=retriggerlink)
Ejemplo n.º 18
0
def import_old():
    existing_tags = {}
    for tag in Tag.query.all():
        exisiting_tags[tag.name] = tag
    with penguicontrax.app.open_resource('schedule2013.html', mode='r') as f:
        tree = ET.fromstring(f.read())
        events = tree.find('document')
        for section in events:
            if section.tag == 'div' and section.attrib['class'] == 'section':
                name = section[0].text
                tag_list = section[1].text # Tag doesn't seem to be in the DB yet
                person = section[3][0].text
                # Only one presenter is supported so far
                firstPerson = person.split(',')[0].split(' ')
                description = section[3][0].tail
                submission = Submission()
                submission.email = '*****@*****.**'
                submission.title = name
                submission.description = description
                submission.duration = 1
                submission.setupTime = 0
                submission.repetition = 0
                submission.firstname = firstPerson[0]
                submission.lastname = firstPerson[1] if len(firstPerson) > 1 else ''  
                submission.followUpState = 0
                submission.tags = []
                for tag in tag_list.split(','):
                    tag = tag.strip()
                    db_tag = None
                    if not tag in existing_tags:
                        db_tag = Tag(tag)
                        penguicontrax.db.session.add(db_tag)
                        existing_tags[tag] = db_tag
                    else:
                        db_tag = existing_tags[tag]
                    submission.tags.append(db_tag)
                penguicontrax.db.session.add(submission)
        penguicontrax.db.session.commit()
Ejemplo n.º 19
0
        x_train = [row['UPDATE_TIME'] for row in data]
        y1_train = [row['BANDWIDTH_TOTAL'] for row in data]
        y2_train = [row['MAX_USER'] for row in data]
        dates_predict = list(week_range(start_date_predict, end_date_predict, weekday))

        model = Model()
        model.fit(x_train, y1_train, y2_train)
        y1_predict, y2_predict = model.predict(dates_predict)

        for date, y1, y2 in zip(dates_predict, y1_predict, y2_predict):
            result.add(date, hour, server, y1, y2)

    pbar.close()
    print()

    submission = Submission()

    print('Prepare submission...')
    with open('data/test_id.csv', 'r') as test_file:
        reader = csv.DictReader(test_file)
        for row in tqdm(reader):
            test_id = row['id']
            update_time = datetime.strptime(row['UPDATE_TIME'], '%Y-%m-%d').date()
            hour = int(row['HOUR_ID'])
            server = row['SERVER_NAME']
            bandwidth, max_user = result.get(update_time, hour, server)
            submission.add_entry(test_id, bandwidth, max_user)
    print()

    print('Start write submission...')
    submission.write()
Ejemplo n.º 20
0
from mlp import MultiLayerPerceptron


TEST_PATH = "data/test.csv"
TRAIN_PATH = "data/train.csv"
test_set = TestSet(TEST_PATH).read()
train_set = TrainSet(TRAIN_PATH).read()
x_train = train_set.drop("label", axis=1).values.astype('float32')
y_train = train_set["label"].values.astype('int32')

#preprocessing
max_value = np.max(x_train)
mean_value = np.mean(x_train)
test_set = (test_set - mean_value) / max_value
x_train = (x_train - mean_value) / max_value


#MLP
MLP_SUBMISSION = "mlp.csv"
mlp = MultiLayerPerceptron(x_train, y_train)
mlp.fit()
predictions = mlp.predict(test_set)
Submission(predictions).save(MLP_SUBMISSION)


for i in range(1, 10):
    random_image = test_set[i,:]
    print(predictions[i])
    Pixel(random_image).display()

Ejemplo n.º 21
0
def main():
    # Command line Interface
    parser = argparse.ArgumentParser()
    parser.add_argument('-d',
                        '--dirpath',
                        default=DATASETS_DIRPATH,
                        help="dataset directory path")
    parser.add_argument('-n',
                        '--n_train',
                        default=None,
                        type=int,
                        help="number of rows to download on the train dataset")
    parser.add_argument('-t',
                        '--n_test',
                        default=None,
                        type=int,
                        help="number of rows to download on the test dataset")
    parser.add_argument('-e',
                        "--epochs",
                        default=3,
                        type=int,
                        help="set the number of epochs")
    parser.add_argument('-b',
                        "--batch_size",
                        default=86,
                        type=int,
                        help="set batch size")
    cli = parser.parse_args()

    # Download and clean train dataset
    train = DataSet(dirpath=cli.dirpath, filename="train.csv")
    train.download(nrows=cli.n_train)
    train.split_X_Y()
    train.normalize()
    train.reshape()
    train.convert_digits_to_one_hot_vectors()
    print(train)

    # Split trian/validation datasets
    validation = train.extract_validation(size=0.1)
    print(validation)

    # Download clean test dataset
    test = DataSet(dirpath=cli.dirpath, filename="test.csv")
    test.download(nrows=cli.n_test)
    test.set_X()
    test.normalize()
    test.reshape()
    print(test)

    # Setup convolutional neural network model
    model = Sequential()

    model.add(
        Conv2D(filters=32,
               kernel_size=(5, 5),
               padding='Same',
               activation='relu',
               input_shape=(28, 28, 1)))
    model.add(
        Conv2D(filters=32,
               kernel_size=(5, 5),
               padding='Same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               padding='Same',
               activation='relu'))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               padding='Same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(256, activation="relu"))
    model.add(Dropout(rate=0.5))
    model.add(Dense(10, activation="softmax"))

    # Define the optimizer
    optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)

    # Compile the model
    model.compile(optimizer=optimizer,
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])

    # Set learning rate decay
    learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
                                                patience=3,
                                                verbose=1,
                                                factor=0.5,
                                                min_lr=0.00001)

    # Perform synthetic data augmentation
    data_generator = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=
        False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=
        10,  # randomly rotate images in the range (degrees, 0 to 180)
        zoom_range=0.1,  # Randomly zoom image
        width_shift_range=
        0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=
        0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=False,  # randomly flip images
        vertical_flip=False  # randomly flip images
    )

    data_generator.fit(train.X)

    history = model.fit_generator(
        data_generator.flow(train.X, train.Y, batch_size=cli.batch_size),
        epochs=cli.epochs,
        validation_data=(validation.X, validation.Y),
        verbose=2,
        steps_per_epoch=train.X.shape[0] // cli.batch_size,
        callbacks=[learning_rate_reduction])

    # plot loss and accuracy
    evaluation.plot_loss_and_accuracy(history)

    # Predict digits for the validation dataset
    prediction = DataSet()
    prediction.Y = model.predict(validation.X)
    prediction.X = validation.X

    prediction.convert_one_hot_vectors_to_digits()
    validation.convert_one_hot_vectors_to_digits()

    confusion_mtx = confusion_matrix(validation.Y, prediction.Y)
    evaluation.plot_confusion_matrix(confusion_mtx)

    # Predict results
    results = model.predict(test.X)
    results = convert_one_hot_vectors_to_digits(results)
    print(results)

    # Generate Submission file
    submission_file = Submission(results)
    submission_file.save()
Ejemplo n.º 22
0
    def mine_bloc(self):
        global VOTE_WINDOW
        """Create a new bloc and add open submissions to it."""
        # update your ip (only if your publickey is registered) so that mining can be shared with all nodes
        if self.public_key is None:
            return None

        publickey = {"publickey": self.public_key}
        requests.post('https://blocbit.net/kitty.php' ,params=publickey)
        # Fetch the currently last bloc of the blocchain
        last_bloc = self.__chain[-1]
        #last_pf = last_bloc.proof
        #window = self.load_window_data()
        # Hash the last bloc (=> to be able to compare it to the stored hash
        # value)
        hashed_bloc = hash_bloc(last_bloc)
        proof = self.proof_by_vote()
        # Added to avoid blocchain startup error after genesis bloxk as it contains no submission i.e. no zero
        # last_pf = last_bloc.proof
        # if last_pf != 86400:
        #     zero = self.submission_zero()           
        # else:
        #     zero = 365.0
        zero = self.submission_zero()
        # Voters have the right to vote daily, so let's create a window submission
        # reward_submission = {
        #     'voter': 'STATION',
        #     'candidate': owner,
        #     'amount': 0 or 1
        # }
        Station_open = Submission(
            'STATION', self.public_key, zero, '', 1)
        Station_closed = Submission(
            'STATION', self.public_key, zero, '', 0)
        # Copy submission instead of manipulating the original
        # open_submissions list
        # This ensures that if for some reason the mining should fail,
        # we don't have the reward submission stored in the open submissions
        copied_submissions = self.__open_submissions[:]
        for tx in copied_submissions:
            if not Ballot.verify_submission(tx):
                return None
        
        # if global var is set to true award right and then set back to false
        if VOTE_WINDOW is False:
            copied_submissions.append(Station_closed)
        else:
            copied_submissions.append(Station_open)
            VOTE_WINDOW = False
        bloc = Bloc(len(self.__chain), hashed_bloc,
                      copied_submissions, proof)
        self.__chain.append(bloc)
        self.__open_submissions = []
        self.save_data()
        for node in self.__peer_nodes:
            url = 'http://{}/broadcast-bloc'.format(node)
            converted_bloc = bloc.__dict__.copy()
            converted_bloc['submissions'] = [
                tx.__dict__ for tx in converted_bloc['submissions']]
            try:
                response = requests.post(url, json={'bloc': converted_bloc})
                if response.status_code == 400 or response.status_code == 500:
                    print('Bloc declined, needs resolving')
                if response.status_code == 409:
                    self.resolve_conflicts = True
            except requests.exceptions.ConnectionError:
                continue
        return bloc
Ejemplo n.º 23
0
def getTumblrUserLikedSubmissions(clientId,
                                  clientSecret,
                                  tokenId,
                                  tokenSecret,
                                  likeRequestLimit=100,
                                  requestOnlyNewCache=None):
    tumblrClient = pytumblr.TumblrRestClient(clientId, clientSecret, tokenId,
                                             tokenSecret)

    # This is an annoying limit the api seems to impose
    POSTS_PER_PAGE = 50

    oldestPageTimestamp = 0
    totalRequests = 0
    submissions = []

    foundOldSubmission = False

    while totalRequests < likeRequestLimit:
        if oldestPageTimestamp:
            tumblrLikes = tumblrClient.likes(**{
                'limit': POSTS_PER_PAGE,
                'offset': totalRequests
            })
        else:
            tumblrLikes = tumblrClient.likes(**{'limit': POSTS_PER_PAGE})

        numPostsThisPage = len(tumblrLikes['liked_posts'])

        if not numPostsThisPage:
            break

        logger.log(
            str(numPostsThisPage) + ' Tumblr likes requested. Total likes: ' +
            str(tumblrLikes['liked_count']))

        for postIndex, post in reversed(
                list(enumerate(tumblrLikes['liked_posts']))):
            if 'photos' in post:
                for photoIndex, photo in enumerate(post['photos']):
                    newSubmission = Submission()

                    newSubmission.source = u'Tumblr'

                    # Tumblr submissions don't have titles, so make one
                    # This'll look ugly in the file browser, unfortunately
                    if len(post['photos']) > 1:
                        newSubmission.title = str(
                            signedCrc32(post['short_url'].encode()))
                        newSubmission.title += u'_'
                        newSubmission.title += str(photoIndex)
                    else:
                        newSubmission.title = str(
                            signedCrc32(post['short_url'].encode()))
                    """logger.log(post)
					return"""
                    newSubmission.author = post['blog_name']

                    newSubmission.subreddit = post['short_url']
                    newSubmission.subredditTitle = post['blog_name'] + '_Tumblr'

                    newSubmission.body = post['caption']
                    newSubmission.bodyUrl = photo['original_size']['url']

                    newSubmission.postUrl = post['short_url']

                    submissions.append(newSubmission)

                    if (requestOnlyNewCache and requestOnlyNewCache[0]
                            and newSubmission.postUrl
                            == requestOnlyNewCache[0].postUrl):
                        logger.log(
                            'Found early out point after ' +
                            str(len(submissions)) + ' new submissions.'
                            ' If you e.g. changed your total requests value and want to go deeper, set'
                            ' Tumblr_Try_Request_Only_New to False in your settings.txt'
                        )
                        foundOldSubmission = True
                        break

            else:
                logger.log('Skipped ' + post['short_url'] +
                           ' (does not have images)')

            if foundOldSubmission:
                break

        if foundOldSubmission:
            break

        oldestPageTimestamp = tumblrLikes['liked_posts'][-1]['liked_timestamp']

        # If we didn't get a full page's worth of posts, we're on the last page
        # Sometimes pages don't have POSTS_PER_PAGE, they're a little under
        RANDOM_PAGE_TOLERANCE = 10
        if numPostsThisPage < POSTS_PER_PAGE - RANDOM_PAGE_TOLERANCE:
            break

        totalRequests += numPostsThisPage

    newEarlyOut = submissions[0] if len(submissions) else None
    return submissions, newEarlyOut
Ejemplo n.º 24
0
def import_old(path, as_convention = False, random_rsvp_users = 0, submission_limit = sys.maxint, timeslot_limit = sys.maxint):
    
    if as_convention == True:
        convention = Convention()
        convention.name = 'Penguicon 2013'
        convention.url = '2013'
        convention.description = 'Penguicon 2013 schedule imported from schedule2013.html'
        convention.start_dt = datetime.datetime(year=2013, month=4, day=26, hour=16)
        convention.end_dt = datetime.datetime(year=2013, month=4, day=28, hour=16)
        convention.timeslot_duration = datetime.timedelta(hours=1)
        penguicontrax.db.session.add(convention)
        current_day = convention.start_dt.date()
        current_time = None

    existing_tags = {}
    for tag in Tag.query.all():
        existing_tags[tag.name] = tag
        
    existing_people = {}
    for person in Person.query.all():
        existing_people[person.name] = person

    existing_tracks = {}
    for track in Track.query.all():
        existing_tracks[track.name] = track

    existing_rooms = {}
    existing_submissions = []

    submission_count = 0
    with penguicontrax.app.open_resource(path, mode='r') as f:
        tree = ET.fromstring(f.read())
        events = tree.find('document')
        for section in events:
            if submission_count == submission_limit:
                break
            if as_convention == True and section.tag == 'time':
                time_text= section.text.split(' ')
                hour = int(time_text[0])
                if time_text[1] == 'PM' and hour != 12:
                    hour += 12
                elif time_text[1] == 'AM' and hour == 12:
                    hour = 0
                new_time = datetime.time(hour = hour)
                if not current_time is None and new_time.hour < current_time.hour:
                    current_day = current_day + datetime.timedelta(days=1)
                current_time = new_time                 
            elif section.tag == 'div' and section.attrib['class'] == 'section':
                name = section[0].text
                tag_list = section[1].text # Tag doesn't seem to be in the DB yet
                room = section[2].text
                person = section[3][0].text
                description = section[3][0].tail
                submission = Submission() if as_convention == False else Events()
                submission.title = name
                submission.description = description
                submission.duration = 1
                submission.setupTime = 0
                submission.repetition = 0
                submission.followUpState = 0
                submission.eventType = 'talk'
                #Load presenters
                submission.personPresenters= []
                for presenter in [presenter.strip() for presenter in person.split(',')]:
                    if presenter == 'Open':
                        continue #"Open" person will cause the schedule to become infesible
                    person = None
                    if not presenter in existing_people:
                        person = Person(presenter)
                        penguicontrax.db.session.add(person)
                        existing_people[presenter] = person
                    else:
                        person = existing_people[presenter]
                    submission.personPresenters.append(person)
                #Load Tags
                submission.tags = []
                for tag in tag_list.split(','):
                    tag = normalize_tag_name(tag)
                    db_tag = None
                    if not tag in existing_tags:
                        db_tag = Tag(tag, tag, True)
                        penguicontrax.db.session.add(db_tag)
                        existing_tags[tag] = db_tag
                    else:
                        db_tag = existing_tags[tag]
                    # Set track -- pick any tag that is also a track
                    if submission.track is None:
                        if tag in existing_tracks:
                            submission.track = existing_tracks[tag]
                    submission.tags.append(db_tag)
                #Load rooms
                if as_convention == True:
                    submission.convention = convention
                    db_room = None
                    if not room in existing_rooms:
                        db_room = Rooms()
                        db_room.room_name = room
                        db_room.convention = convention
                        penguicontrax.db.session.add(db_room)
                        existing_rooms[room] = db_room
                    else:
                        db_room = existing_rooms[room]
                    if not current_day is None and not current_time is None:
                        submission.rooms.append(db_room)
                        submission.start_dt = datetime.datetime(year=current_day.year, month=current_day.month, day=current_day.day,\
                            hour = current_time.hour, minute=current_time.minute)
                        submission.duration = 4 #1 hour
                existing_submissions.append(submission)
                penguicontrax.db.session.add(submission)
                submission_count = submission_count + 1
        penguicontrax.db.session.commit()

    if random_rsvp_users > 0:
        for user_index in range(random_rsvp_users):
            user = User()
            user.name = 'Random User %d' % user_index
            user.email = '*****@*****.**' % user_index
            user.public_rsvps = True
            user.staff = False
            user.special_tag = None
            user.superuser = False
            generate_account_name(user)
            gravatar_image_update(user)
            for rsvp_index in range(user.points):
                rand = random.randint(0, len(existing_submissions) - 1)
                while user in existing_submissions[rand].rsvped_by:
                    rand = random.randint(0, len(existing_submissions) - 1)
                existing_submissions[rand].rsvped_by.append(user)
            user.points = 0
            penguicontrax.db.session.add(user)
        penguicontrax.db.session.commit()
        
    if as_convention == True:
        from event import generate_schedule, generate_timeslots
        generate_timeslots(convention, timeslot_limit)
        all_rooms = [room for room in existing_rooms.viewvalues()]
        hackerspace = [existing_rooms['Hackerspace A'], existing_rooms['Hackerspace B']]
        food = [existing_rooms['Food']]
        from copy import copy
        general_rooms = copy(all_rooms)
        general_rooms.remove(hackerspace[0])
        general_rooms.remove(hackerspace[1])
        general_rooms.remove(food[0])
        timeslots = [timeslot for timeslot in convention.timeslots]
        for submission in existing_submissions:
            if food[0] in submission.rooms:
                submission.suitable_rooms = food
            elif hackerspace[0] in submission.rooms or hackerspace[1] in submission.rooms:
                submission.suitable_rooms = hackerspace
            else:
                submission.suitable_rooms = general_rooms
        for room in all_rooms:
            room.available_timeslots = timeslots
        generate_schedule(convention)
Ejemplo n.º 25
0
def getPinterestUserPinnedSubmissions(email, username, password,
                                      cacheFileName):

    submissions = []

    lastIds = {} if not cacheFileName else loadPinterestCache(cacheFileName)
    updatedLastIds = lastIds

    pinterest = Pinterest(email=email,
                          password=password,
                          username=username,
                          cred_root='pinterest_creds')

    logger.log("Logging in to Pinterest...")
    pinterest.login()

    boards = pinterest.boards(username=username)

    for board in boards:
        # Get all pins for the board
        board_pins = []
        pin_batch = pinterest.board_feed(board_id=board['id'])

        while len(pin_batch) > 0:
            for pin in pin_batch:
                if pin['id'] not in lastIds:
                    # Only using the dict for its key lookup
                    updatedLastIds[pin['id']] = 1
                    board_pins.append(pin)

            pin_batch = pinterest.board_feed(board_id=board['id'])

        for pin in board_pins:

            # I'm not sure how important it is to support these
            if pin['type'] == 'story':
                continue

            newSubmission = Submission()
            newSubmission.source = u'Pinterest'
            # While pins do have titles, 90% of the time they seem useless
            newSubmission.title = pin['id']
            # There is probably a way to figure out who the original pinner is, but oh well
            newSubmission.author = 'N/A'
            newSubmission.subreddit = board['url']
            newSubmission.subredditTitle = board['name'] + '_Pinterest'
            if 'rich_summary' in pin and pin['rich_summary']:
                if 'display_description' in pin['rich_summary']:
                    newSubmission.body = pin['rich_summary'][
                        'display_description']
                else:
                    newSubmission.body = 'N/A'
                newSubmission.postUrl = pin['rich_summary']['url']

            # What is actually downloaded
            newSubmission.bodyUrl = pin['images']['orig']['url']
            submissions.append(newSubmission)

    if cacheFileName:
        savePinterestCache(cacheFileName, updatedLastIds)

    logger.log("Found {} new Pinterest submissions".format(len(submissions)))
    return submissions
Ejemplo n.º 26
0
 def get_submission_by_id(self, submission_id: str):
     self.cursor.execute("SELECT * FROM submissions where id = ?",
                         (submission_id,))
     if db_response := self.cursor.fetchone():
         return Submission(*db_response)
def getSubmissionsFromRedditList(redditList,
                                 source,
                                 earlyOutPoint=None,
                                 unlikeUnsave=False):
    submissions = []
    comments = []

    numTotalSubmissions = len(redditList)
    for currentSubmissionIndex, singleSubmission in enumerate(redditList):
        if currentSubmissionIndex and currentSubmissionIndex % 100 == 0:
            logger.log('Got {} submissions...'.format(currentSubmissionIndex))

        if type(singleSubmission) is praw.models.Submission:
            newSubmission = Submission()

            newSubmission.source = u'reddit'

            newSubmission.title = singleSubmission.title
            newSubmission.author = singleSubmission.author.name if singleSubmission.author else u'no_author'

            newSubmission.subreddit = singleSubmission.subreddit.url
            newSubmission.subredditTitle = singleSubmission.subreddit.title

            newSubmission.body = singleSubmission.selftext
            newSubmission.bodyUrl = singleSubmission.url

            newSubmission.postUrl = singleSubmission.permalink

            submissions.append(newSubmission)

            logger.log(
                percentageComplete(currentSubmissionIndex,
                                   numTotalSubmissions))

            if unlikeUnsave:
                if source == 'liked':
                    singleSubmission.clear_vote()
                else:
                    singleSubmission.unsave()

                logger.log('Unsaved/cleared vote on submission ' +
                           singleSubmission.permalink)

            # Check to see if we've already downloaded this submission; if so, early out
            if (earlyOutPoint and earlyOutPoint[0]
                    and newSubmission.postUrl == earlyOutPoint[0].postUrl):
                logger.log(
                    'Found early out point after ' + str(len(submissions)) +
                    ' new submissions.'
                    ' If you e.g. changed your total requests value and want to go deeper, set'
                    ' Reddit_Try_Request_Only_New to False in your settings.txt'
                )
                break

        # The submission is actually a saved comment
        else:
            # I looked at https://praw.readthedocs.io/en/latest/getting_started/quick_start.html
            #  very bottom to learn how to enumerate what information a submission can provide
            # logger.log(singleSubmission.body)
            # pprint.plogger.log(vars(singleSubmission))
            newSubmission = Submission()

            newSubmission.source = u'reddit'

            newSubmission.title = u'Comment on ' + singleSubmission.link_title
            newSubmission.author = singleSubmission.author.name if singleSubmission.author else u'no_author'

            newSubmission.subreddit = singleSubmission.subreddit.url
            newSubmission.subredditTitle = singleSubmission.subreddit.title

            newSubmission.body = singleSubmission.body
            newSubmission.bodyUrl = singleSubmission.permalink

            newSubmission.postUrl = singleSubmission.link_permalink

            comments.append(newSubmission)

    return submissions, comments
Ejemplo n.º 28
0
 def get_recent_submissions(self, start_time_utc: int) -> list[Submission]:
     self.cursor.execute("SELECT * FROM submissions WHERE created_utc >= ?",
                         (start_time_utc,))
     if db_response := self.cursor.fetchall():
         return [Submission(*row) for row in db_response]
Ejemplo n.º 29
0
    def submit_submission(self, link, description, assignment, grades):
        '''Method that creates instance of Submission class and adds this instance to list of all instance of
        Submission class created by the given instance of Student class.'''

        self.submissions.append(Submission(link, description, assignment, grades))
Ejemplo n.º 30
0
from submission import Submission
from tweet import Tweet
from datetime import datetime
from time import sleep

while True:
    time = datetime.now()

    # Check if current time is within 2 minutes of the hour
    if time.minute <= 2 or time.minute >= 58:
        submission = Submission(['aww'])
        submission.generate()

        permalink = submission.permalink()
        url = submission.url()
        permalink_short = submission.permalink_short()

        tweet = Tweet(permalink, url, permalink_short)
        tweet.post()

    print('Trying again after {} minutes.\n'.format(60 - time.minute))
    sleep(60 * (60 - time.minute))