コード例 #1
0
 def run(self):
     while True:
         urls = self.check(interval=5 * 60)
         filename = utils.generate_filename(self.room_id)
         c_filename = os.path.join(os.getcwd(), 'files', filename)
         self.record(urls[0], c_filename)
         self.print(self.room_id, '录制完成')
コード例 #2
0
 def run(self) -> None:
     logging.basicConfig(
         level=utils.get_log_level(self.config),
         format=
         '%(asctime)s %(thread)d %(threadName)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
         datefmt='%a, %d %b %Y %H:%M:%S',
         handlers=[
             logging.FileHandler(os.path.join(
                 self.config['root']['logger']['log_path'], "LiveRecoder_" +
                 datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') +
                 '.log'),
                                 "a",
                                 encoding="utf-8")
         ])
     while True:
         try:
             if self.live_status:
                 urls = self.get_live_urls()
                 filename = utils.generate_filename(self.room_id)
                 c_filename = os.path.join(self.record_dir, filename)
                 self.record(urls[0], c_filename)
                 logging.info(self.generate_log('录制完成' + c_filename))
             else:
                 logging.info(self.generate_log('下播了'))
                 break
         except Exception as e:
             logging.error(
                 self.generate_log('Error while checking or recording:' +
                                   str(e) + traceback.format_exc()))
コード例 #3
0
def db_test():
    db = get_db()
    db.execute(
        "INSERT INTO IMAGE (NAME, LONGITUDE, LATITUDE) VALUES (?, ?, ?)",
        [generate_filename("test.jpg"), 15, 25])
    db.commit()
    return 'Insert Successful'
コード例 #4
0
ファイル: gtk_classes.py プロジェクト: siadat/tix
 def save(self):
   buff = self.get_buffer()
   text = buff.get_text(buff.get_start_iter(), buff.get_end_iter())
   if not self.note:
     import utils
     self.note = Note(utils.generate_filename(), utils.user_configurations['TIXPATH'], text)
   self.note.write_text_to_file(text)
   return self.note
コード例 #5
0
 def run(self):
     while True:
         try:
             self.print(self.room_id, '等待开播')
             urls = self.check(interval=self.check_interval)
             filename = utils.generate_filename(self.room_id)
             self.record(urls, filename)
             self.print(self.room_id, '录制完成' + filename)
         except Exception as e:
             self.print(self.room_id,
                        'Error while checking or recording:' + str(e))
コード例 #6
0
 def write_config(self):
     with open(
             os.path.join("sqbs_configs",
                          generate_filename(self.email, ".json")),
             'w') as f:
         json.dump(
             {
                 "agg_id": self.aggregate_id,
                 "roster_id": self.roster_id,
                 "last_run": 0
             }, f)
コード例 #7
0
ファイル: forms.py プロジェクト: rogerhil/heybaldock
 def save(self):
     data = self.cleaned_data
     filename = generate_filename(data['image'].name)
     handler = ImageHandlerInstrument()
     handler.load(filename, data['image'])
     handler.save_thumbnails('PNG')
     instrument = Instrument.objects.create(
         name=data['name'],
         description=data.get('description'),
         image=filename
     )
     self.instance = instrument
コード例 #8
0
 def run(self):
     while True:
         try:
             urls = self.check(interval=self.check_interval)
             filename = utils.generate_filename(self.room_id)
             utils.checkRecordDirExisted()
             c_filename = os.path.join(os.getcwd(), 'files', filename)
             self.record(urls[0], c_filename)
             self.print(self.room_id, '录制完成' + c_filename)
         except Exception as e:
             self.print(self.room_id,
                        'Error while checking or recording:' + str(e))
コード例 #9
0
ファイル: image.py プロジェクト: scitechindian/flask-picam
    def snap(self):
        """
        Snap an image and saves it to /static/images/image-**timestamp**.jpg
        """
        filename = generate_filename("jpg")
        with picamera.Picamera() as camera:
            camera.start_preview()
            time.sleep(2)
            camera.capture(filename)
            camera.stop_preview()

        return filename
コード例 #10
0
ファイル: forms.py プロジェクト: rogerhil/heybaldock
 def save(self):
     data = self.cleaned_data
     filename = generate_filename(data['file'].name)
     handler = FileHandlerDocument()
     handler.load(filename, data['file'])
     document = DocumentPlayerRepertoryItem(name=data['file'].name,
                     document=filename,
                     player_repertory_item_id=data['player_repertory_item'])
     if handler.is_image():
         handler.save_thumbnails('PNG')
         document.type = DocumentType.image
     document.save()
     return document
コード例 #11
0
ファイル: app.py プロジェクト: FunkySayu/rawpaster
def add_content(key):

    key_folder = os.path.join(config.data_folder, key)
    if not os.path.exists(key_folder):
        logging.info("Folder %s does not exists ; creating.", key_folder)
        os.mkdir(key_folder)

    target_name = generate_filename(key)
    with open(os.path.join(key_folder, target_name), "w") as f:
        logging.info("User identified by key '%s' has pushed a file.", key)
        f.write(request.data.decode())

    return "/%s" % target_name
コード例 #12
0
ファイル: video.py プロジェクト: scitechindian/flask-picam
    def shoot(self):
        """
        Shoots a video and saves it to /static/videos/video-**timestamp**.h264
        """
        filename = generate_filename('h264', 'videos')
        with picamera.Picamera() as camera:
            camera.resolution = (640, 480)
            camera.start_preview()
            camera.start_recording(filename)
            camera.wait_recording(10)
            camera.stop_recording()
            camera.stop_preview()

        return filename
コード例 #13
0
 def run(self):
     while True:
         try:
             urls = self.check(interval=self.check_interval)
             filename = utils.generate_filename()
             utils.checkRecordDirExisted()
             if not os.path.exists(os.path.join(self.out_dir, self.room_owner)):
                 os.makedirs(os.path.join(self.out_dir, self.room_owner))
             c_filename = os.path.join(self.out_dir, self.room_owner, filename)
             self.record(urls[0], c_filename)
             self.print(self.room_id, '录制完成' + c_filename)
         except Exception as e:
             self.print(self.room_id,
                        'Error while checking or recording:' + str(e))
コード例 #14
0
ファイル: video.py プロジェクト: edthix/flask-picam
    def shoot(self):
        """
        Shoots a video and saves it to /static/videos/video-**timestamp**.h264
        """
        filename = generate_filename('h264', 'videos')
        with picamera.Picamera() as camera:
            camera.resolution = (640, 480)
            camera.start_preview()
            camera.start_recording(filename)
            camera.wait_recording(10)
            camera.stop_recording()
            camera.stop_preview()

        return filename
コード例 #15
0
def validate_convert_args(args):
    err_dict = {
        "missing":
        "Missing {}.",
        "unauthorized":
        "Your email is invalid or unauthorized! Make sure this is the same email you used to create your scoresheet.",
        "rounds_min":
        "First round number invalid - it must be a number no greater than {}.".
        format(MAX_ROUNDS),
        "rounds_max":
        "Last round number invalid - it must be a number no greater than {}.".
        format(MAX_ROUNDS),
        "min_lt_max":
        "First round number can't be greater than last round number."
    }

    # check if any required arguments aren't present (after client-side validation)
    for check_var in ("email", "rounds_min", "rounds_max"):
        if check_var not in args:
            return {"error": err_dict["missing"].format(check_var)}

    # validate against pre-approved emails
    if not authorize_email(args["email"]):
        return {"error": err_dict["unauthorized"]}

    # check round min and round max are integers, in the correct range, and min < max
    try:
        args["rounds_min"] = int(args["rounds_min"])
        assert args["rounds_min"] <= MAX_ROUNDS and args["rounds_min"] > 0
    except:
        return {"error": err_dict["rounds_min"]}
    try:
        args["rounds_max"] = int(args["rounds_max"])
        assert args["rounds_max"] <= MAX_ROOMS and args["rounds_max"] > 0
    except:
        return {"error": err_dict["rounds_max"]}

    try:
        assert args["rounds_min"] <= args["rounds_max"]
    except:
        return {"error": err_dict["min_lt_max"]}

    # if user has created sheets before, there'll be a sqbs config for it
    if os.path.isfile(
            os.path.join("sqbs_configs",
                         generate_filename(args["email"], ".json"))):
        return False

    return {"error": err_dict["unauthorized"]}
コード例 #16
0
    def run(self):
        status = NOT_START
        c_filename = None
        while True:
            try:
                urls = self.check(interval=self.check_interval, blocking=False)

                if urls is None and status == NOT_START:  # just wait
                    self.print(self.room_id, '等待开播')
                    time.sleep(self.check_interval)

                if urls and status == NOT_START:  # start recording
                    status = self.next_status(status, True)
                    filename = utils.generate_filename(self.room_id)
                    c_filename = os.path.join(os.getcwd(), 'files', filename)
                    self.print(self.room_id, '开始录制' + c_filename)
                    self.record(urls[0], c_filename)

                if urls and status == RECORDING:  # recording
                    self.record(urls[0], c_filename)

                if urls is None and status == RECORDING:  # stream end
                    self.print(self.room_id, '录制完成' + c_filename)
                    status = self.next_status(status, True)
                    subprocess.run(
                        'ffmpeg -y -i {c_filename} -c:a copy -c:v copy {final_name}'
                        .format(c_filename=c_filename,
                                final_name=c_filename.split('.')[0] +
                                '_final.mp4'),
                        shell=True,
                        stderr=subprocess.DEVNULL,
                        stdout=subprocess.DEVNULL)
                    print("ffmpeg done.")
                    try:
                        if callable(self.on_stop):
                            self.on_stop(
                                c_filename.split('.')[0] +
                                '_final.mp4')  # callback
                        elif self.on_stop is not None:
                            raise NotcallableError('on_stop is not callable')
                    except Exception as e:
                        self.print(
                            self.room_id,
                            'Error while calling on_stop callback' + str(e))

            except Exception as e:
                self.print(self.room_id,
                           'Error while checking or recording:' + str(e))
                status = self.next_status(status, False)
コード例 #17
0
 def run(self) -> None:
     while True:
         try:
             if self.live_status:
                 urls = self.get_live_urls()
                 filename = utils.generate_filename(self.room_id)
                 c_filename = os.path.join(self.record_dir, filename)
                 self.record(urls[0], c_filename)
                 logging.info(self.generate_log('录制完成' + c_filename))
             else:
                 logging.info(self.generate_log('下播了'))
                 break
         except Exception as e:
             logging.error(
                 self.generate_log('Error while checking or recording:' +
                                   str(e)))
コード例 #18
0
def upload_file():
    if request.method == 'POST':
        # check if the post request has the file part
        if 'file' not in request.files:
            flash('No file part')
            return redirect(request.url)
        file = request.files['file']
        # if user does not select file, browser also
        # submit an empty part without filename
        if file.filename == '':
            flash('No selected file')
            return redirect(request.url)
        elif file and allowed_file(file.filename):
            filename = generate_filename(file.filename)
            file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
            return redirect(url_for('uploaded_file', filename=filename))
    return '''
コード例 #19
0
def api_generate_questions():
    user_id = request.headers.get('User-Id')
    if user_id is None:
        return utils.response(403, 'Unauthorized')

    json_data = request.get_json()
    if 'content' not in json_data or len(json_data['content']) < 20:
        return utils.response(400, 'Content is not valid')
    content = json_data['content'].strip()
    content_hash = str(
        hashlib.sha256(content.encode('utf-8')).hexdigest()[:32]).strip()

    article = mongo.db.user_articles.find_one({'id': content_hash})
    if article is not None:
        questions = list(
            mongo.db.user_questions.find({'article_id': content_hash}))
    else:
        article_id = content_hash
        article = {
            'id': article_id,
            'type': 'text',
            'created_time': datetime.datetime.utcnow(),
            'publish_time': datetime.datetime.utcnow(),
            'thumbnail': '',
            'title': '',
            'content': content
        }

        questions = generate_questions(content)
        for ques in questions:
            ques['id'] = utils.generate_filename()
            ques['article_id'] = article_id

        mongo.db.user_articles.insert_one(article)
        mongo.db.user_questions.insert_many(questions)

    num_sent = sum([1 if len(sent) > 20 else 0 for sent in content.split('.')])
    max_count = min(num_sent, 10)
    questions = question_recommender.recommend(questions,
                                               user_id,
                                               max_count=max_count)
    questions = filter(questions, ['answer', 'explain'])
    article['questions'] = list(questions)

    return utils.response(200, 'Success', article)
コード例 #20
0
    def on_save_draft(self, action, paraneter):
        """
        Save post data to the draft file
        """
        dialog = Gtk.FileChooserDialog(
            "Save draft", self.main_window, Gtk.FileChooserAction.SAVE,
            (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_SAVE,
             Gtk.ResponseType.OK))

        save_button = dialog.get_header_bar().get_children()[1]
        save_button.get_style_context().add_class("suggested-action")

        # Add filters
        draft_filter = Gtk.FileFilter()
        draft_filter.set_name("Draft files")
        draft_filter.add_pattern("*.sbd")
        dialog.add_filter(draft_filter)

        any_filter = Gtk.FileFilter()
        any_filter.set_name("Any files")
        any_filter.add_pattern("*")
        dialog.add_filter(any_filter)

        post_title = self.main_window.title_entry.get_text()
        dialog.set_current_name(utils.generate_filename(post_title) + u".sbd")
        dialog.set_current_folder(os.path.expanduser("~"))

        response = dialog.run()
        if response == Gtk.ResponseType.OK:
            filename = dialog.get_filename()
            with open(filename, "wb") as draft_file:
                draft_obj = {
                    "title": post_title,
                    "body":
                    self.main_window.sourceview.get_buffer().props.text,
                    "tags": self.main_window.tag_entry.get_text()
                }
                pickle.dump(draft_obj, draft_file)
            self.main_window.infobar.get_content_area().get_children(
            )[0].set_text("Draft was succesfully saved")
            self.main_window.infobar.get_action_area().get_children(
            )[1].props.visible = True
            self.main_window.infobar.show()
        dialog.destroy()
コード例 #21
0
ファイル: main.py プロジェクト: tranHieuDev23/VSCD-Server
def speak_submit():
    try:
        authorId = request.form['authorId']
        recordings = []
        for item in request.files:
            if (item not in CLASSES):
                continue
            class_path = join(DATASET_DIRECTORY, item)
            if (not exists(class_path)):
                mkdir(class_path)
            file = request.files[item]
            filename = generate_filename(authorId, 'wav')
            filepath = join(class_path, filename)
            file.save(filepath)
            recordings.append((filepath, authorId, item))
        insert_recordings(recordings)
    except:
        return ('', 400)
    return ('', 200)
コード例 #22
0
    def on_save_draft(self, action, paraneter):
        """
        Save post data to the draft file
        """
        dialog = Gtk.FileChooserDialog(
            "Save draft", self.main_window, Gtk.FileChooserAction.SAVE,
            (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
             Gtk.STOCK_SAVE, Gtk.ResponseType.OK)
        )

        save_button = dialog.get_header_bar().get_children()[1]
        save_button.get_style_context().add_class("suggested-action")

        # Add filters
        draft_filter = Gtk.FileFilter()
        draft_filter.set_name("Draft files")
        draft_filter.add_pattern("*.sbd")
        dialog.add_filter(draft_filter)

        any_filter = Gtk.FileFilter()
        any_filter.set_name("Any files")
        any_filter.add_pattern("*")
        dialog.add_filter(any_filter)

        post_title = self.main_window.title_entry.get_text()
        dialog.set_current_name(utils.generate_filename(post_title) + u".sbd")
        dialog.set_current_folder(os.path.expanduser("~"))

        response = dialog.run()
        if response == Gtk.ResponseType.OK:
            filename = dialog.get_filename()
            with open(filename, "wb") as draft_file:
                draft_obj = {
                    "title": post_title,
                    "body": self.main_window.sourceview.get_buffer().props.text,
                    "tags": self.main_window.tag_entry.get_text()
                }
                pickle.dump(draft_obj, draft_file)
            self.main_window.infobar.get_content_area().get_children()[0].set_text("Draft was succesfully saved")
            self.main_window.infobar.get_action_area().get_children()[1].props.visible = True
            self.main_window.infobar.show()
        dialog.destroy()
コード例 #23
0
 def run(self):
     try:
         while True:
             res = self.check()
             if isinstance(res, list):
                 self.fname = utils.generate_filename(
                     self.room_id, self.room_info['roomname'])
                 if self.capture_danmaku:
                     self.dmlogger = dmxml.BLiveXMLlogger(self.room_id,
                                                          uid=0)
                     self.dmlogger.run(saving_path=os.path.join(
                         self.saving_path, self.fname + '.xml'))
                     self.lalogger = dmlar.BLiveLARlogger(self.room_id,
                                                          uid=0)
                     self.lalogger.run(saving_path=os.path.join(
                         self.saving_path, self.fname + '.lar'))
                 self.stream_rec_thread = threading.Thread(
                     target=self.record, args=(res[0], ))
                 self.stream_rec_thread.start()
             if res:
                 if self.recording_lock.acquire(
                         timeout=self.check_interval):
                     self.stream_rec_thread.join()
                     if self.capture_danmaku:
                         self.dmlogger.terminate()
                         del self.dmlogger
                         self.lalogger.terminate()
                         del self.lalogger
                     self.recording_lock.release()
             else:
                 time.sleep(self.check_interval)
     except Exception as e:
         utils.print_log(self.room_id,
                         'Error while checking or recording:' + str(e))
     finally:
         if self.recording_lock.locked() and self.capture_danmaku:
             self.dmlogger.terminate()
             del self.dmlogger
             self.lalogger.terminate()
             del self.lalogger
             self.recording_lock.release()
コード例 #24
0
def convert():
    req = dict((i, j.strip())
               for i, j in zip(request.args.keys(), request.args.values()))

    invalid = validate_convert_args(req)
    if invalid:
        return json.dumps(invalid)

    filename = generate_filename(req["email"], ".json")
    full_filename = os.path.join("sqbs_configs", filename)

    d = {}
    with open(full_filename) as f:
        d = json.load(f)

    if int(time.time()) - d["last_run"] < CONVERSION_REPEAT_DELAY:
        return json.dumps({
            "error":
            "Please wait at least {} seconds in between submitting sqbs conversion jobs"
            .format(CONVERSION_REPEAT_DELAY + CONVERSION_SCHEDULE_INTERVAL)
        })

    for item in sqbs_queue:
        if item[0] == full_filename:
            return json.dumps({
                "error":
                "You already have a job request for that aggregate sheet"
            })

    if not isinstance(req["rounds_min"], int) or not isinstance(
            req["rounds_max"], int):
        return json.dumps({"error": "Round numbers must be integers"})

    log.info(f"[{filename}] -- adding to SQBS queue -- {sqbs_queue}")
    sqbs_queue.append(
        (full_filename, int(req["rounds_min"]), int(req["rounds_max"])))
    with open(full_filename, "w") as f:
        d["email"] = req["email"]
        json.dump(d, f)
    return json.dumps({"success": req["email"]})
コード例 #25
0
def doc_handler(msg):
    chat_id = msg.chat.id
    if msg.caption == "/get_song":
        doc = msg.document
        filename = doc.file_name
        file_id = doc.file_id
        real_filename = utils.generate_filename()
        if filename.split('.')[-1] != 'osz':
            bot.send_message(chat_id, "Wrong file extension")
            return
        if doc.file_size > 10485760:
            bot.send_message(chat_id, "File size is too large")
            return
        utils.get_file(file_id, f"temp/{real_filename}.zip")
        check = utils.get_song(real_filename)
        if check != "ok":
            bot.send_message(chat_id, "That's more than one song, idk which one you want...")
            utils.delete_temp_files(zip_filename=f"temp/{real_filename}.zip")
            return
        bot.send_document(chat_id, open(f"temp/{real_filename}.mp3", 'rb'), timeout=40)
        utils.delete_temp_files(zip_filename=f"temp/{real_filename}.zip", mp3_filename=f"temp/{real_filename}.mp3")
    return
コード例 #26
0
ファイル: main.py プロジェクト: markobogoevski/TextGeneration
def main(args):
    # Training settings
    parser = argparse.ArgumentParser(
        description='PyTorch Attentive RNN Language Modeling')
    parser.add_argument('--batch-size',
                        type=int,
                        default=8,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--epochs',
                        type=int,
                        default=50,
                        metavar='N',
                        help='number of epochs to train (default: 40)')
    parser.add_argument('--lr',
                        type=float,
                        default=30.0,
                        metavar='LR',
                        help='learning rate (default: 30.0)')
    parser.add_argument('--patience',
                        type=int,
                        default=5,
                        metavar='P',
                        help='patience for lr decrease (default: 5)')
    parser.add_argument('--seed',
                        type=int,
                        default=123,
                        metavar='S',
                        help='random seed (default: 123)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help=
        'how many batches to wait before logging training status (default 10)')
    parser.add_argument('--dataset',
                        default='shakespeare',
                        const='shakespeare',
                        nargs='?',
                        choices=['wiki-02', 'ptb', 'shakespeare'],
                        help='Select which dataset (default: %(default)s)')

    parser.add_argument(
        '--embedding-size',
        type=int,
        default=150,
        metavar='N',
        help='embedding size for embedding layer (default: 20)')
    parser.add_argument('--n-layers',
                        type=int,
                        default=5,
                        metavar='N',
                        help='layer size for RNN encoder (default: 1)')
    parser.add_argument('--hidden-size',
                        type=int,
                        default=150,
                        metavar='N',
                        help='hidden size for RNN encoder (default: 20)')
    parser.add_argument(
        '--positioning-embedding',
        type=int,
        default=150,
        metavar='N',
        help='hidden size for positioning generator (default: 20)')
    parser.add_argument('--input-dropout',
                        type=float,
                        default=0.5,
                        metavar='D',
                        help='input dropout (default: 0.5)')
    parser.add_argument('--rnn-dropout',
                        type=float,
                        default=0.0,
                        metavar='D',
                        help='rnn dropout (default: 0.0)')
    parser.add_argument('--decoder-dropout',
                        type=float,
                        default=0.5,
                        metavar='D',
                        help='decoder dropout (default: 0.5)')
    parser.add_argument(
        '--clip',
        type=float,
        default=0.25,
        metavar='N',
        help='value at which to clip the norm of gradients (default: 0.25)')

    parser.add_argument('--optim',
                        default='sgd',
                        const='sgd',
                        nargs='?',
                        choices=['sgd', 'adam', 'asgd'],
                        help='Select which optimizer (default: %(default)s)')

    parser.add_argument(
        '--salton-lr-schedule',
        help=
        'Enables same training schedule as Salton et al. 2017 (default: False)',
        action='store_true')

    parser.add_argument('--early-stopping-patience',
                        type=int,
                        default=35,
                        metavar='P',
                        help='early stopping patience (default: 25)')

    parser.add_argument('--attention',
                        help='Enable standard attention (default: False',
                        action='store_true')

    parser.add_argument('--no-positional-attention',
                        help='Disable positional attention (default: False',
                        action='store_false')

    parser.add_argument(
        '--tie-weights',
        help='Tie embedding and decoder weights (default: False',
        action='store_true')

    parser.add_argument(
        '--file-name',
        action="store",
        help=
        'Specific filename to save under (default: uses params to generate',
        default=False)

    parser.add_argument('--parallel',
                        help='Enable using GPUs in parallel (default: False',
                        action='store_true')

    args = parser.parse_args(args)

    if not args.file_name:
        args.file_name = generate_filename(args)

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    writer = SummaryWriter('runs/' + args.file_name)

    train_iter, valid_iter, test_iter, vocab = get_dataset(
        dataset=args.dataset, batch_size=args.batch_size, device=device)

    args.vocab_size = len(vocab)

    model = get_model(args)

    if torch.cuda.device_count() > 1 and args.parallel:
        print("Using", torch.cuda.device_count(), "GPUs")
        model = nn.DataParallel(model)
    else:
        if args.parallel:
            warnings.warn(
                "Passed in parallel flag but torch unable to detect multiple GPUs"
            )
        args.parallel = False

    model.to(device)
    print(str(model))
    print(count_parameters(model))

    # Training Set Up
    if args.optim == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              weight_decay=12e-7)
    if args.optim == 'asgd':
        optimizer = optim.ASGD(model.parameters(),
                               lr=args.lr,
                               weight_decay=12e-7)
    if args.optim == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args.lr,
                               betas=(0.0, 0.999),
                               eps=1e-8,
                               weight_decay=12e-7,
                               amsgrad=True)

    criterion = nn.CrossEntropyLoss(ignore_index=0, reduction='sum')

    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     mode='min',
                                                     patience=args.patience,
                                                     verbose=True,
                                                     factor=0.5)

    early_stopping_counter = 0
    best_val_loss = False

    # At any point you can hit Ctrl + C to break out of training early.
    try:
        # Loop over epochs.
        for epoch in range(1, args.epochs + 1):

            if args.salton_lr_schedule:
                current_learning_rate = 0.5**max(epoch - 12, 0.0)
                optimizer.param_groups[0]['lr'] = current_learning_rate

            epoch_start_time = time.time()

            train(args, model, train_iter, valid_iter, criterion, optimizer,
                  epoch, writer)

            if args.parallel:
                with open('models/temp.pt'.format(args.file_name), 'wb') as fw:
                    torch.save(model.module.state_dict(), fw)

                with open('models/temp.pt'.format(args.file_name), 'rb') as fr:
                    single_gpu_model = get_model(args)
                    # load on either single gpu or on the cpu (if gpu not avail)
                    single_gpu_model.load_state_dict(
                        torch.load(fr, map_location=device))
                    single_gpu_model.to(device)

                val_loss = evaluate(args,
                                    single_gpu_model,
                                    valid_iter,
                                    criterion,
                                    save_attention=True,
                                    epoch=epoch,
                                    vocabulary=vocab)
                del single_gpu_model

            else:
                val_loss = evaluate(args,
                                    model,
                                    valid_iter,
                                    criterion,
                                    save_attention=True,
                                    epoch=epoch,
                                    vocabulary=vocab)

            test_loss = evaluate(args, model, test_iter, criterion)

            # possibly update learning rate
            scheduler.step(val_loss)

            # track learning ratei
            writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)

            writer.add_scalar('validation_loss_at_epoch', val_loss, epoch)
            writer.add_scalar('test_loss_at_epoch', test_loss, epoch)

            writer.add_scalar('validation_perplexity_at_epoch',
                              min(math.exp(min(val_loss, 7)), 1000), epoch)

            writer.add_scalar('test_perplexity_at_epoch',
                              min(math.exp(min(test_loss, 7)), 1000), epoch)

            print('-' * 89)
            print(
                '| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
                'valid ppl {:8.2f}'.format(epoch,
                                           (time.time() - epoch_start_time),
                                           val_loss, math.exp(min(val_loss,
                                                                  7))))
            print('-' * 89)
            # Save the model if the validation loss is the best we've seen so far.
            if not best_val_loss or val_loss < best_val_loss:
                if not os.path.exists('models'):
                    os.makedirs('models')

                with open('models/{}.pt'.format(args.file_name), 'wb') as f:
                    if args.parallel:
                        torch.save(model.module.state_dict(), f)
                    else:
                        torch.save(model.state_dict(), f)

                best_val_loss = val_loss
                early_stopping_counter = 0
            else:
                early_stopping_counter += 1

            writer.add_scalar('best_validation_perplexity_at_epoch',
                              min(math.exp(min(best_val_loss, 7)), 1000),
                              epoch)
            if early_stopping_counter >= args.early_stopping_patience:
                print("Validation loss has not improved for {}".format(
                    early_stopping_counter))
                print("Ending Training early at epoch {}".format(epoch))
                break

    except KeyboardInterrupt:
        print('-' * 89)
        print('Exiting from training early')

    if os.path.exists('models/{}.pt'.format(args.file_name)):
        # Load the best saved model.
        with open('models/{}.pt'.format(args.file_name), 'rb') as f:
            # instantiate model
            model = get_model(args)
            # load on either single gpu or on the cpu (if gpu not avail)
            model.load_state_dict(torch.load(f, map_location=device))
            model.to(device)
            # after load the rnn params are not a continuous chunk of memory
            # this makes them a continuous chunk, and will speed up forward pass
            model.flatten_parameters()

        # Run on test data.
        test_loss = evaluate(args, model, test_iter, criterion)
        print('=' * 89)
        print(
            '| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
                test_loss, math.exp(test_loss)))
        print('=' * 89)
コード例 #27
0
        total_loss_sum += info[0]
        ctr_sum += 1
        total_ctr += info[1]
        if (i + 1) % global_data_print_freq == 0:
            write_file_and_close(global_output_filename,
                "epoch: {:d}, "
                "train set index: {:d}, "
                "average loss: {:.10f}"
                .format(epoch, i, running_loss_sum / ctr_sum)
            )
            running_loss_sum = 0.0
            ctr_sum = 0
        it = it + 1
    write_file_and_close(global_output_filename,
        "Epoch {:d} finished, average loss: {:.10f}"
        .format(epoch, total_loss_sum / total_ctr)
    )
    if (epoch + 1) % global_epoch_test_freq == 0:
        write_file_and_close(global_output_filename, "Starting testing")
        info = [0., 0., 0.]
        test(info)
        write_file_and_close(global_output_filename,
            "Correct: {:d}, total: {:d}, "
            "accuracy: {:.10f}, average loss: {:.10f}"
            .format(info[0], info[1], info[0] / info[1], info[2] / info[1])
        )
        write_file_and_close(global_output_filename, "Finished testing")

model_filename = generate_filename()
torch.save(net, model_filename)
コード例 #28
0
def run(
    experiment_class='Experiment',
    name='',
    batch_size=100,
    max_iters=10000,
    filepath='experiments',
    random_seed=0,
    parallel=False,
    verbose=False,
):
    print(experiment_class, name, batch_size, max_iters, random_seed)
    #Task
    meta_selector = BudgetMetaSelector()
    env = gym.make('LQG1D-v0')
    R = np.asscalar(env.Q * env.max_pos**2 + env.R * env.max_action**2)
    M = env.max_pos
    gamma = env.gamma
    H = env.horizon
    tp = TaskProp(gamma, H, -env.max_action, env.max_action, R, M,
                  -env.max_pos, env.max_pos, 2 * env.max_action)
    local = True

    #Policy
    theta_0 = -0.1
    w = math.log(1)  #math.log(env.sigma_controller)
    pol = ExpGaussPolicy(theta_0, w)

    #Features
    feature_fun = utils.identity

    #Constraints
    constr = OptConstr(delta=0.2,
                       N_min=batch_size,
                       N_max=500000,
                       N_tot=30000000,
                       max_iter=max_iters,
                       approximate_gradients=True)

    #Evaluation of expected performance
    def evaluate(pol, deterministic=False):
        var = 0 if deterministic else pol.cov
        return env.computeJ(pol.theta_mat, var)

    #Run
    # exp = adaptive_exploration.Experiment(env, tp, grad_estimator, meta_selector, constr, feature_fun, evaluate, name=name)
    # exp = adaptive_exploration.CollectDataExperiment(env, tp, grad_estimator, meta_selector, constr, feature_fun, evaluate, name=name)
    # exp = adaptive_exploration.SafeExperiment(env, tp, grad_estimator, meta_selector, constr, feature_fun, evaluate, name=name, random_seed=random_seed)

    experiment = AVAILABLE_EXPERIMENTS[experiment_class]
    exp = experiment(env,
                     tp,
                     meta_selector,
                     constr,
                     feature_fun,
                     evaluate=evaluate,
                     name=name,
                     random_seed=random_seed)

    exp.run(pol,
            local,
            parallel,
            verbose=verbose,
            filename=os.path.join(filepath, name + utils.generate_filename()))
コード例 #29
0
    def run(self):
        while True:
            try:
                status, info = self.check(interval=self.check_interval)
                if not status:
                    self.print(room_id=self.room_id,
                               content='Not Broadcasting...')
                    time.sleep(self.check_interval)
                    continue

                self.print(room_id=self.room_id, content='Start Broadcasting!')
                self.inform(text=f"{self.room_id}开播了", desp=info['roomname'])

                # time.sleep(1.5)
                # status, urls = self.get_live_urls_byapi()
                # url = urls[0]
                # print(urls)
                # print("url:" + url)
                # path = urlparse(url).path
                # ext = str(os.path.splitext(path)[1])
                # print(ext)
                # Directly use get_live_urls_byweb, prevent m3u8sucks

                time.sleep(1)
                status, urls = self.get_live_urls_byweb()
                if not status:
                    self.print("Error! Getting play_url From Web Failed.")
                    self.inform(
                        text=
                        f"Error! Getting play_url From Web Failed. Room:{self.room_id}",
                        desp=info['roomname'])
                    time.sleep(3)
                    continue
                print(urls)
                ext = str(os.path.splitext(urlparse(urls[0]).path)[1])
                print(ext)

                # generate file name
                filename = utils.generate_filename(self.room_id)
                c_filename = os.path.join(os.getcwd(), 'files', filename)

                if self.use_option_link:
                    # If you use this tool overseas, you maybe set use_option_link to True and use a proxy.
                    # use the last url in urls, which is generally available for overseas user.
                    url = urls[len(urls) - 1]
                else:
                    # use the first url in urls, which is generally available for domenstic user.
                    url = urls[0]

                if ext == '.flv':
                    self.flvrecord(url, c_filename + ".flv")
                elif ext == '.m3u8':
                    self.print(
                        f"Warning! Recording in m3u8 mode, maybe the record file is not complete."
                    )
                    self.inform(
                        text=
                        f"Warning! Recording in m3u8 mode, maybe the record file is not complete. Room:{self.room_id}",
                        desp=info['roomname'])
                    self.m3u8record(url, c_filename + ".mp4")

                self.print(self.room_id, '录制完成' + c_filename)
                self.inform(text=f"{self.room_id}录制结束", desp="")
            except Exception as e:
                self.print(self.room_id,
                           'Error while checking or recording:' + str(e))
                traceback.print_exc()
                self.inform(text=f"Error!", desp=str(e))
コード例 #30
0
    def run(
            self,
            policy,
            use_local_stats=False,  # Update task prop only with local stats
            parallel=True,
            filename=generate_filename(),
            verbose=False):

        self.use_local_stats = True
        self.initial_configuration = self.get_param_list(locals())
        self.estimator = Estimators(self.task_prop, self.constr)

        N = N_old = self.constr.N_min  # Total number of trajectories to take in this iteration
        N1, N2, N3 = self.split_trajectory_count(N)

        #Multiprocessing preparation
        if parallel:
            self._enable_parallel()

        # COMPUTE BASELINES
        features, actions, rewards, prevJ, gradients = self.get_trajectories_data(
            policy, N, parallel=parallel)
        prevJ_det = self.estimate_policy_performance(policy,
                                                     N,
                                                     parallel=parallel,
                                                     deterministic=True,
                                                     get_min=False)

        #Learning
        iteration = 0
        N_tot = 0
        start_time = time.time()
        J_hat = prevJ
        J_journey = (2 * prevJ + prevJ_det) / 3

        while iteration < self.constr.max_iter:
            iteration += 1
            J_det_exact = self.evaluate(policy, deterministic=True)
            self.make_checkpoint(locals())  # CHECKPOINT BEFORE SIGMA STEP

            J_journey = 0

            # PRINT
            if verbose:
                if iteration % 50 == 1:
                    print(
                        'IT\tN\t\tJ\t\t\tJ_DET\t\t\tTHETA\t\tSIGMA\t\t\tBUDGET'
                    )
                print(iteration, '\t', N, '\t', J_hat, '\t', prevJ_det, '\t',
                      policy.get_theta(), '\t', policy.sigma, '\t',
                      self.budget / N, '\t',
                      time.time() - start_time)

            start_time = time.time()

            # PERFORM FIRST STEP
            features, actions, rewards, J_hat, gradients = self.get_trajectories_data(
                policy, N1, parallel=parallel)
            J_journey += J_hat * N1

            if iteration > 1:
                self.budget += N3 * (
                    J_hat - prevJ)  # B += J(theta, sigma') - J(theta, sigma)
                prevJ = J_hat

            alpha, N1, safe = self.meta_selector.select_alpha(
                policy, gradients, self.task_prop, N1, iteration, self.budget)
            policy.update(alpha * gradients['grad_theta'])

            # PERFORM SECOND STEP
            newJ_det = self.estimate_policy_performance(policy,
                                                        N2,
                                                        parallel=parallel,
                                                        deterministic=True)
            J_journey += newJ_det * N2

            self.budget += N2 * (newJ_det - prevJ_det
                                 )  # B += J(theta', 0) - J(theta, 0)

            prevJ_det = newJ_det

            # PERFORM THIRD STEP
            features, actions, rewards, J_hat, gradients = self.get_trajectories_data(
                policy, N3, parallel=parallel)
            J_journey += J_hat * N3

            self.budget += N1 * (J_hat - prevJ
                                 )  # B += J(theta', sigma) - J(theta, sigma)
            prevJ = J_hat

            beta, N3, safe = self.meta_selector.select_beta(
                policy, gradients, self.task_prop, N3, iteration, self.budget)
            policy.update_w(beta * gradients['gradDeltaW'])

            N_old = N
            J_journey /= N
            #Check if done
            N_tot += N
            if N_tot >= self.constr.N_tot:
                print('Total N reached')
                print('End experiment')
                break

        # SAVE DATA

        self.save_data(filename)
コード例 #31
0
    def run(
            self,
            policy,
            use_local_stats=False,  # Update task prop only with local stats
            parallel=True,
            filename=generate_filename(),
            verbose=False):

        self.use_local_stats = True
        self.initial_configuration = self.get_param_list(locals())
        self.estimator = Estimators(self.task_prop, self.constr)

        N = N_old = self.constr.N_min  # Total number of trajectories to take in this iteration
        N1, N2, N3 = self.split_trajectory_count(N)

        #Multiprocessing preparation
        if parallel:
            self._enable_parallel()

        # COMPUTE BASELINES
        features, actions, rewards, prevJ, gradients = self.get_trajectories_data(
            policy, N, parallel=parallel)
        #prevJ_det = self.estimate_policy_performance(policy, N, parallel=parallel, deterministic=True, get_min=False)
        prevJ_det = prevJ

        #Learning
        iteration = 0
        N_tot = 0
        start_time = time.time()
        J_hat = prevJ
        J_journey = (2 * prevJ + prevJ_det) / 3
        policy_low_variance = policy

        while iteration < self.constr.max_iter:
            iteration += 1
            J_det_exact = self.env.computeJ(policy.theta_mat, 0)
            self.make_checkpoint(locals())  # CHECKPOINT BEFORE SIGMA STEP

            J_journey = 0

            # PRINT
            if verbose:
                if iteration % 50 == 1:
                    print(
                        'IT\tN\t\tJ\t\t\tJ_DET\t\t\tTHETA\t\tSIGMA\t\t\tBUDGET'
                    )
                print(iteration, '\t', N, '\t', J_hat, '\t', prevJ_det, '\t',
                      policy.get_theta(), '\t', policy.sigma, '\t',
                      self.budget / N, '\t',
                      time.time() - start_time)

            start_time = time.time()

            # PERFORM FIRST STEP
            features, actions, rewards, J_hat, gradients = self.get_trajectories_data(
                policy, N1, parallel=parallel)
            J_journey += J_hat * N1

            if iteration > 1:
                self.budget += N3 * (
                    J_hat - prevJ)  # B += J(theta, sigma') - J(theta, sigma)
                prevJ = J_hat

            alpha, N1, safe = self.meta_selector.select_alpha(
                policy, gradients, self.task_prop, N1, iteration, self.budget)
            policy.update(alpha * gradients['grad_theta'])

            # PERFORM THIRD STEP
            features, actions, rewards, J_hat, gradients = self.get_trajectories_data(
                policy, N3, parallel=parallel)
            J_journey += J_hat * N3

            self.budget += N1 * (J_hat - prevJ
                                 )  # B += J(theta', sigma) - J(theta, sigma)
            prevJ = J_hat

            beta, N3, safe = self.meta_selector.select_beta(
                policy, gradients, self.task_prop, N3, iteration, self.budget)
            # policy.update_w(beta * gradients['gradDeltaW'])

            # COMPUTE OPTIMAL SIGMA_0 USING THE BOUND
            d = policy.penaltyCoeffSigma(self.task_prop.R, self.task_prop.M,
                                         self.task_prop.gamma,
                                         self.task_prop.volume)
            if self.budget / N2 >= -(gradients['grad_w']**2) / (4 * d):
                beta_minus = (1 - math.sqrt(1 - (4 * d * (-self.budget / N2)) /
                                            (gradients['grad_w']**2))) / (2 *
                                                                          d)
                beta_plus = (1 + math.sqrt(1 - (4 * d * (-self.budget / N2)) /
                                           (gradients['grad_w']**2))) / (2 * d)
                w_det = min(policy.w + beta_minus * gradients['grad_w'],
                            policy.w + beta_plus * gradients['grad_w'])
                policy_low_variance = policies.ExpGaussPolicy(
                    np.copy(policy.theta_mat), w_det)
            else:
                policy_low_variance = policy

            newJ_det = self.estimate_policy_performance(policy_low_variance,
                                                        N2,
                                                        parallel=parallel)
            J_journey += newJ_det * N2

            self.budget += N2 * (newJ_det - prevJ_det
                                 )  # B += J(theta', 0) - J(theta, 0)

            prevJ_det = newJ_det

            # beta, N3, safe = self.meta_selector.select_beta(policy, gradients, self.task_prop, N3, iteration, self.budget)
            policy.update_w(beta * gradients['gradDeltaW'])

            N_old = N
            J_journey /= N
            #Check if done
            N_tot += N
            if N_tot >= self.constr.N_tot:
                print('Total N reached')
                print('End experiment')
                break

            # def signal_handler(signal, frame):
            #     self.save_data(filename)
            #     sys.exit(0)
            #
            # #Manual stop
            # signal.signal(signal.SIGINT, signal_handler)

        # SAVE DATA

        self.save_data(filename)
コード例 #32
0
    def run(self,
            policy,
            use_local_stats=False,
            parallel=True,
            filename=generate_filename(),
            verbose=False):
        self.use_local_stats = True
        self.initial_configuration = self.get_param_list(locals())
        self.estimator = Estimators(self.task_prop, self.constr)

        N = self.constr.N_min  # Total number of trajectories to take in this iteration

        #Multiprocessing preparation
        if parallel:
            self._enable_parallel()

        # COMPUTE BASELINES
        features, actions, rewards, prevJ, gradients = self.get_trajectories_data(
            policy, N, parallel=parallel)
        J_baseline = prevJ

        #Learning
        iteration = 0
        N_tot = 0
        J_journey = prevJ
        start_time = time.time()

        N1 = N // 2
        N2 = N - N1

        while iteration < self.constr.max_iter:
            iteration += 1
            J_det_exact = self.evaluate(policy, deterministic=True)
            self.make_checkpoint(locals())  # CHECKPOINT BEFORE SIGMA STEP

            J_journey = 0

            # PRINT
            if verbose:
                if iteration % 50 == 1:
                    print(
                        'IT\tN\t\tJ\t\t\tJ_DET\t\t\tTHETA\t\tSIGMA\t\t\tBUDGET'
                    )
                print(iteration, '\t', N, '\t', prevJ, '\t', 0, '\t',
                      policy.get_theta(), '\t', policy.sigma, '\t',
                      self.budget / N, '\t',
                      time.time() - start_time)

            start_time = time.time()

            # PERFORM FIRST STEP
            features, actions, rewards, prevJ, gradients = self.get_trajectories_data(
                policy, N1, parallel=parallel)
            J_journey += prevJ * N1

            budget = prevJ - J_baseline
            alpha, N1, safe = self.meta_selector.select_alpha(policy,
                                                              gradients,
                                                              self.task_prop,
                                                              N1,
                                                              iteration,
                                                              budget=budget)
            policy.update(alpha * gradients['grad_theta'])

            # PERFORM SECOND STEP
            features, actions, rewards, prevJ, gradients = self.get_trajectories_data(
                policy, N2, parallel=parallel)
            J_journey += prevJ * N2

            budget = prevJ - J_baseline
            beta, _, _ = self.meta_selector.select_beta(policy,
                                                        gradients,
                                                        self.task_prop,
                                                        N2,
                                                        iteration,
                                                        budget=budget)
            policy.update_w(beta * gradients['gradDeltaW'])

            J_journey /= N
            N_tot += N
            if N_tot >= self.constr.N_tot:
                print('Total N reached\nEnd experiment')
                break

        # SAVE DATA

        self.save_data(filename)
コード例 #33
0
def biz():
    csv_fname = generate_filename('ALL_BUSINESSES') + '.csv'
    limit = 20
    offset = 0
    pagination = True
    nxt_url = None

    while pagination:
        print('---------------------------------START: BUSINESS API GET: OFFSET {}'.format(offset))
        api_url = FIVESTARS_BUSINESS_GROUP_OFFSET_API.format(limit, offset) if not nxt_url else nxt_url
        resp = request(api_url)
        if resp:
            jsn = resp.json()

            nxt_url = jsn.get('meta').get('next')
            if not nxt_url:
                pagination = False

            data = jsn.get('data')
            if data:
                for biz_group in data:
                    total_biz = biz_group.get('total_businesses')
                    bizs = biz_group.get('businesses')
                    # print('bizs', bizs)
                    if bizs:
                        fstars = FiveStars(bizs)
                        businesses = fstars.get_businesses()
                        for biz in businesses:
                            name = biz.get('name')

                            addr = biz.get('address')
                            street = addr.get('street')
                            city = addr.get('city')
                            state = addr.get('state')
                            zipcode = addr.get('postal_code')

                            phone = biz.get('phone')
                            if phone and len(phone) == 10:
                                phone = '(' + phone[:3] + ') ' + phone[4:]

                            total_locs = total_biz
                            website = biz.get('website')
                            fb = biz.get('facebook')
                            instagram = biz.get('instagram')
                            tw = biz.get('twitter')
                            yelp = biz.get('yelp')
                            prof = biz.get('profile')

                            desc = biz.get('description')
                            hours = biz.get('hours')
                            keywords = biz.get('keywords')
                            logo = biz.get('logo')
                            gplus = biz.get('google_plus')
                            cat = biz.get('category')
                            write_csv(csv_fname, [[
                                name,
                                street,
                                city,
                                state,
                                zipcode,
                                phone,
                                total_locs,
                                website,
                                fb,
                                instagram,
                                tw,
                                yelp,
                                prof,
                                desc,
                                hours,
                                keywords,
                                logo,
                                gplus,
                                cat
                            ]], 'output')
                            print('[+] Done >> {}'.format(name))
        offset += limit
        print('---------------------------------END: BUSINESS API GET: OFFSET {}'.format(offset))
コード例 #34
0
ファイル: main.py プロジェクト: pantuts/scrapers
def scrape(division=False, csv_file=None, newcol=False):
    csv_fname = generate_filename('MAXPREPS') + '.csv'
    cookies = get_cookies()
    s = requests.Session()

    done_text = 'We do not have rankings for this selection, please try another search'

    if not division:
        pagination = True
        cur_page = 1
        while pagination:
            soup = request(s, RANKINGS_URL.format(cur_page), cookies)
            if soup:
                if done_text in str(soup):
                    break
                else:
                    print(
                        '-----------------------Page {}-----------------------'
                        .format(cur_page))
                    cur_page = cur_page + 1

                    try:
                        trs = soup.select('table tr')[1:]
                    except:
                        trs = []

                    for tr in trs:
                        rp = RowParser(tr)
                        name = rp.get_name()
                        rating = rp.get_rating()
                        strength = rp.get_strength()
                        p_url = rp.get_profle_url()
                        s_num = rp.get_search_num()

                        prof_soup = request(s, p_url, cookies)
                        if soup:
                            pp = ProfileParser(s, cookies, prof_soup, s_num,
                                               name, rating, strength, p_url)
                            data = pp.get_data()
                            write_csv(csv_fname, data)
                            print('[+] Done >> {}'.format(name))
    else:
        # this is just for added state class/ranks cols
        if newcol:
            with open(csv_file, 'r') as csvfile:
                reader = csv.reader(csvfile)
                for row in reader:
                    name = row[5]
                    url = row[4]
                    soup = request(s, url, cookies)
                    if soup:
                        fp = FootballProfile(name, soup)
                        data = [[
                            row[0], row[1], row[2], row[3], row[4], row[5],
                            row[6], row[7], row[8], row[9], row[10], row[11],
                            row[12], row[13], row[14], row[15], row[16],
                            row[17], row[18],
                            fp.state_class_division_name(),
                            fp.state_division_rank()
                        ]]
                        write_csv('__' + csv_file, data)
                        print('[+] Done >> {}'.format(name))
        else:
            for div in DIVISIONS:
                print(
                    '-----------------------DIVISION: {}-----------------------'
                    .format(div))
                state_soup = request(s, RANKINGS_BY_DIVISION.format(1, div),
                                     cookies)
                if state_soup:
                    state_class = get_state_class(state_soup)
                    if state_class:
                        for sc in state_class:
                            clas_url = sc[1]
                            clas_name = sc[0]
                            print(
                                '-----------------------STATE CLASS: {}-----------------------'
                                .format(clas_name))

                            pagination = True
                            cur_page = 1
                            while pagination:
                                # '/rankings/football-fall-16/{}/division/fl/asZCeSbCLkCIkW5UXU_cOw/division-8a.htm'
                                url = re.sub(r'/[0-9]{1,3}/', '/{}/', clas_url)
                                soup = request(s, url.format(cur_page),
                                               cookies)
                                if soup:
                                    if done_text in str(soup):
                                        pagination = False
                                    else:
                                        print(
                                            '-----------------------Page {}-----------------------'
                                            .format(cur_page))
                                        cur_page = cur_page + 1

                                        try:
                                            trs = soup.select('table tr')[1:]
                                        except:
                                            trs = []

                                        for tr in trs:
                                            rp = RowParser(tr)
                                            name = rp.get_name()
                                            rating = rp.get_rating()
                                            strength = rp.get_strength()
                                            p_url = rp.get_profle_url()
                                            s_num = rp.get_search_num()

                                            prof_soup = request(
                                                s, p_url, cookies)
                                            if soup:
                                                pp = ProfileParser(
                                                    s, cookies, prof_soup,
                                                    s_num, name, rating,
                                                    strength, p_url)
                                                data = pp.get_data()
                                                write_csv(csv_fname, data)
                                                print('[+] Done >> {}'.format(
                                                    name))
                    else:
                        pagination = True
                        cur_page = 1
                        while pagination:
                            soup = request(
                                s, RANKINGS_BY_DIVISION.format(cur_page, div),
                                cookies)
                            if soup:
                                if done_text in str(soup):
                                    pagination = False
                                else:
                                    print(
                                        '-----------------------Page {}-----------------------'
                                        .format(cur_page))
                                    cur_page = cur_page + 1

                                    try:
                                        trs = soup.select('table tr')[1:]
                                    except:
                                        trs = []

                                    for tr in trs:
                                        rp = RowParser(tr)
                                        name = rp.get_name()
                                        rating = rp.get_rating()
                                        strength = rp.get_strength()
                                        p_url = rp.get_profle_url()
                                        s_num = rp.get_search_num()

                                        prof_soup = request(s, p_url, cookies)
                                        if soup:
                                            pp = ProfileParser(
                                                s, cookies, prof_soup, s_num,
                                                name, rating, strength, p_url)
                                            data = pp.get_data()
                                            write_csv(csv_fname, data)
                                            print(
                                                '[+] Done >> {}'.format(name))