Exemple #1
0
    def post(self):
        json_data = request.get_json(force=True)

        if not json_data or not 'project_name' in json_data or not 'start_date' in json_data or not 'end_date' in json_data or not 'priority' in json_data or not 'user_id' in json_data:
            return {"error" : "Input data missing"}, 400
        
        project = Project.query.filter_by(project_name=json_data['project_name']).first()

        if project:
            return {'message': 'Project already exists'}, 400

        user = User.query.filter_by(id=json_data['user_id']).first()

        if not user:
            return {'message': 'User doest not exist'}, 400
 
        project = Project(
            project_name = json_data['project_name'],
            start_date = HelperUtil.stringToDate(self, json_data['start_date']),
            end_date = HelperUtil.stringToDate(self, json_data['end_date']),
            priority = json_data['priority'],
            status = constant.ACTIVE,
            user_id = json_data['user_id']
        )

        db.session.add(project)
        db.session.commit()

        result = project_schema.dump(project)

        return {"message" : "Saved successfully!", "data" : result}, 201
Exemple #2
0
def get_projects():
    url = "https://kwork.ru/projects"
    headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) \
                                                Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)'}
    projects = []
    response = requests.get(url, headers=headers)
    log.info( "Status code: %d. Length: %d " % ( response.status_code, len( response.text ) ) )
    html = response.text
    soup = BeautifulSoup( html, "html.parser" )
    items = soup.select("div[class*=js-card]")
    for item in items[:]:
        title = item.select_one("div[class*=header-title]")
        title = title.text if title else "Error title"
        price = item.select_one("div.wants-card__right")
        price = re.findall( r"\d{3}|\d{1,2}\s\d{3}", str(price) )
        price = " - ".join(price)
        description = item.select_one("div.breakwords.hidden")
        description = description.text.replace("Скрыть","").strip() if description else "Description error"
        if description == "Description error":
            description = item.select_one("div.breakwords.first-letter ~ div")
            description = description.text if description else "Description error2"
            # import pdb;pdb.set_trace()
        proposal_count = item.find(lambda tag:tag.name == "span" and "Предложений:" in tag.text)
        proposal_count = re.findall(r"\d+", proposal_count.text)[0] if proposal_count else "Prop error"
        author = item.select_one("a.v-align-t")
        author = author.text if author else "Author error"
        link = item.select_one("div.wants-card__header-title a")
        link = link['href'] if link else "Link error"
        timer = item.find( lambda tag:tag.name == "span" and "Осталось" in tag.text)
        timer = timer.text if timer else "timer error"
        params = (title, description, author, proposal_count,
                  price, timer, link)
        project = Project( *params )
        projects.append( project )
    return projects
Exemple #3
0
def infer(args):
    data = Data(str(args.test))
    model = Project(*args, **kwargs)
    model.load_state_dict(torch.load(PATH))
    model.eval()
    data_loader = torch.utils.data.DataLoader(
        data, batch_size=len(data), num_workers=24
    )
    for i in range(data_loader):
        results = model(data_loader)
    file = open(args.testcsv, "w")
    file.write("\n".join(results.tolist()))
    file.close()
    print("Done....")
Exemple #4
0
def main():
    parser = argparse.ArgumentParser(description="Process some integers.")
    parser.add_argument("--file")
    parser.add_argument("--epouch")
    parser.add_argument("--batch_size")
    parser.add_argument("--num_workers")
    args = parser.parse_args()
    batch_size = int(args.batch_size)
    data = Data(args.file)
    validation_split = 0.2
    shuffle_dataset = True
    random_seed = 42
    dataset_size = len(data)
    indices = list(range(dataset_size))
    split = int(np.floor(validation_split * dataset_size))
    if shuffle_dataset:
        np.random.seed(random_seed)
        np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]

    train_sampler = SubsetRandomSampler(train_indices)
    valid_sampler = SubsetRandomSampler(val_indices)

    train_loader = torch.utils.data.DataLoader(data,
                                               batch_size=batch_size,
                                               sampler=train_sampler,
                                               num_workers=24)
    validation_loader = torch.utils.data.DataLoader(
        data,
        batch_size=len(valid_sampler),
        sampler=valid_sampler,
        num_workers=24)

    net = Project()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
    train(net, criterion, int(args.epouch), optimizer, train_loader,
          validation_loader)