Ejemplo n.º 1
0
def test_incorrect_content_type(get_database_client, app):
    mock_db = MockFirestore()
    get_database_client.return_value = mock_db

    with app.test_request_context(
            json={'item': 'test'},
            content_type='application/x-www-form-urlencoded',
            headers={'Origin': 'https://inventoryapp-276220.web.app'}):
        res = main.post(flask.request)
        assert res.status_code == 400
Ejemplo n.º 2
0
def test_post_not_existing(get_database_client, app):
    mock_db = MockFirestore()
    get_database_client.return_value = mock_db

    with app.test_request_context(
            json={
                "item": "item1",
                "quantity": 4
            },
            content_type="application/json",
            method='POST',
            headers={'Origin': 'https://inventoryapp-276220.web.app'}):
        res = main.post(flask.request)
        assert res.status_code == 404
Ejemplo n.º 3
0
def recordSubmission(sub, filename, queries):
    import main
    with open(filename, 'a') as fileObj:
        matches = []
        for I in queries:
            if I in sub.title:
                matches += [I]
        if matches:
            json.dump(
                main.post(sub.created, sub.title, 0, 'reddit', 'submission',
                          ' '.join(matches)).dict, fileObj)
            fileObj.write('\n')
        for I in sub.comments:
            if type(I) != praw.models.reddit.comment.Comment: continue
            matches = []
            for II in queries:
                if II in I.body:
                    matches += [II]
            if matches:
                json.dump(
                    main.post(I.created, I.body, 0, 'reddit', 'comment',
                              ' '.join(matches)).dict, fileObj)
                fileObj.write('\n')
    return
Ejemplo n.º 4
0
def test_post_updates(get_database_client, app):
    mock_db = MockFirestore()
    get_database_client.return_value = mock_db

    mock_db.collection('items').document('item1').set({'quantity': 1})
    expected = {'quantity': 4}
    with app.test_request_context(
            json={
                "item": "item1",
                "quantity": 4
            },
            content_type="application/json",
            method='POST',
            headers={'Origin': 'https://inventoryapp-276220.web.app'}):
        res = main.post(flask.request)
        assert res.status_code == 200
        assert expected == res.get_json()
        doc = mock_db.collection('items').document('item1').get()
        assert doc.to_dict() == expected
        assert 'https://inventoryapp-276220.web.app' == res.headers[
            'Access-Control-Allow-Origin']
        assert 'true' == res.headers['Access-Control-Allow-Credentials']
landmarks = []
currentHits = []
landmarkScores = []
partialUpdateApi = 'https://vpc-srsdata-entity-996037dbb77d-yzau6raclfxc3kgbxxvjibwscm.ap-southeast-1.es.amazonaws' \
                   '.com/tvlk_entity_prod_read_alias/_update/{} '
entityInitialScrollApiUrl = 'https://vpc-srsdata-entity-996037dbb77d-yzau6raclfxc3kgbxxvjibwscm.ap-southeast-1.es.amazonaws' \
                            '.com/tvlk_entity_prod_read_alias/_search?scroll=10s'

entityInitialScrollApiBody = '{"size": 500,"query":{"match":{"pC":"LANDMARK"}},"sort":[{"s.ID_ID.p":{' \
                      '"order":"desc"}}],"_source":{"includes":["id","s.ID_ID.p"],"excludes":[]}} '

entityScrollApiUrl = 'https://vpc-srsdata-entity-996037dbb77d-yzau6raclfxc3kgbxxvjibwscm.ap-southeast-1.es.amazonaws' \
                            '.com/tvlk_entity_prod_read_alias/_search/scroll'
entityScrollApiBody = '{"size": 500,"scroll":"10s","scroll_id":"$scroll_id"}'

response = json.loads(main.post(entityInitialScrollApiUrl, entityInitialScrollApiBody).content)
currentHits = response['hits']['hits']
landmarks.extend(currentHits)
scroll_id = response["_scroll_id"]

while len(currentHits) > 0:
    Template(entityScrollApiBody).substitute(scroll_id=scroll_id)
    response = json.loads(main.post(entityScrollApiUrl, entityScrollApiBody).content)
    print(response)
    currentHits = response['hits']['hits']
    landmarks.extend(currentHits)
    scroll_id = response["_scroll_id"]
    print(len(landmarks))

for landmark in landmarks:
    landmarkScores.append(landmark['_source']['s']['ID_ID']['p'])
Ejemplo n.º 6
0
import main

main.post()
Ejemplo n.º 7
0
from main import post, scrapplaylist, scrapprofile, deletevariables
ExamplePlaylist = 'https://www.youtube.com/playlist?list='
ExampleProfile = 'https://www.youtube.com/user/username/videos'
scrapplaylist(ExamplePlaylist)
post(kategoria='CATEGORY')
deletevariables()
scrapprofile(ExampleProfile)
post(kategoria='CATEGORY')
import sys

entityTypeToIdPrifix = {
    'GEO': 'LOCAL_GEO',
    'EXPERIENCE': 'EXPERIENCE',
    'CULINARY': 'CULINARY_RESTAURANTS',
    'HOTEL': 'ACCOM_HOTELS'
}

partialUpdateApi = 'https://vpc-srsdata-entity-996037dbb77d-yzau6raclfxc3kgbxxvjibwscm.ap-southeast-1.es.amazonaws.com/tvlk_entity_prod_read_alias/_update/{}'
csvFileName = sys.argv[1]
print(csvFileName)
with open(csvFileName) as csv_file:
    csv_reader = csv.reader(csv_file)
    skip = False
    for row in csv_reader:
        if skip:
            skip = False
            continue
        if float(row[5]) <= 0.0:
            continue
        id = entityTypeToIdPrifix[row[1]] + "_" + row[0]
        popKeyValue = [""]
        partialUpdatePayload = json.dumps(
            dict(doc=dict(normProps=dict(pS=float(row[5])))))
        print(id)
        try:
            main.post(str.format(partialUpdateApi, id), partialUpdatePayload)
        except Exception as e:
            print("failed to partial update on " + id + " " + e)