Example #1
0
 def authorize(self):
     """Shows basic usage of the Drive v3 API.
     Prints the names and ids of the first 10 files the user has access to.
     """
     creds = None
     # The file token.pickle stores the user's access and refresh tokens, and is
     # created automatically when the authorization flow completes for the first
     # time.
     if os.path.exists('token.pickle'):
         with open('token.pickle', 'rb') as token:
             creds = pickle.load(token)
     else:
         error("token.pickle doesn't exist")
     # If there are no (valid) credentials available, let the user log in.
     if not creds or not creds.valid:
         if creds and creds.expired and creds.refresh_token:
             # notify("Login again", crashed=True)
             error("Creds expired. Refreshing....")
             creds.refresh(Request())
         else:
             flow = InstalledAppFlow.from_client_secrets_file(
                 'credentials.json', SCOPES)
             creds = flow.run_local_server(port=0)
         # Save the credentials for the next run
         with open('token.pickle', 'wb') as token:
             pickle.dump(creds, token)
     info("Authorized with Drive API")
     self.service = build('drive', 'v3', credentials=creds)
Example #2
0
def update_song_link(song_key, song_name, old=False):
    link = search_by_name(song_name)
    if link != -1:
        query = 'update song_list set link_found = true, link = \'{}\' where id = {}'.format(
            link, song_key)
        execute_query(query)
        info("Link for song {} found".format(song_name))
        return link
    else:
        if not old:
            notify("Link for song {} not found".format(song_name),
                   customText=True)
        info("Link for song {} not found".format(song_name))
    return None
Example #3
0
 def upload(self, key, name, file_path):
     try:
         self.authorize()
         file_metadata = {'name': name, 'key': key}
         media = MediaFileUpload(file_path, mimetype='audio/mpeg')
         result_file = self.service.files().create(body=file_metadata,
                                                   media_body=media,
                                                   fields='id').execute()
         info("Song {} uploaded to drive".format(name))
         self.delete_duplicate()
         return True if result_file else False
     except Exception:
         import traceback
         traceback.print_exc()
         stacktrace = traceback.format_exc()
         error(stacktrace)
Example #4
0
def notify(name, crashed=False, customText=False):
    from pyfcm import FCMNotification

    push_service = FCMNotification(api_key="api_key")
    registration_id = "device_registration_token"
    message_title = "Song downloaded!" if not crashed else "Program crashed"

    if customText:
        message_title = "My Shazam Songs"
    if not customText:
        message_body = "Your song \"{}\" is ready in Google Drive ([email protected])".format(
            name) if not crashed else name
    else:
        message_body = name

    if not crashed:
        result = push_service.notify_single_device(
            registration_id=registration_id,
            message_title=message_title,
            message_body=message_body)
        if result['success'] == 1:
            info("Notification sent for song {}".format(name) if not crashed
                 else "Crash notification sent with msg = {}".format(name))
Example #5
0
    def delete_duplicate(self):
        info("Deleting duplicate files in drive")
        try:
            self.authorize()
            pageToken = None
            songs = []
            dups = {}
            while True:
                results = self.service.files().list(
                    pageToken=pageToken,
                    fields="nextPageToken, files(id,name,modifiedTime)",
                    orderBy="modifiedTime asc").execute()
                items = results.get('files', [])
                pageToken = results.get('nextPageToken', None)
                songs += items
                if pageToken is None:
                    break

            found_dups = False
            for song in songs:
                if song['name'] in dups:
                    found_dups = True
                    dups[song["name"]].append({
                        'name':
                        song['name'],
                        'id':
                        song['id'],
                        'modifiedTime':
                        song['modifiedTime']
                    })
                else:
                    dups[song["name"]] = [{
                        'name': song['name'],
                        'id': song['id'],
                        'modifiedTime': song['modifiedTime']
                    }]
            if found_dups:
                info("No duplicate files found")
            for dup in dups:
                if len(dups[dup]) > 1:
                    others = sorted(dups[dup],
                                    key=lambda x: x['modifiedTime'])[1:]

                    for inst in others:
                        info(("Deleting duplicate song: {}".format(
                            inst['name'])))
                        self.service.files().delete(
                            fileId=inst['id']).execute()
        except Exception:
            import traceback
            traceback.print_exc()
            stacktrace = traceback.format_exc()
            error(stacktrace)
Example #6
0
def download_and_upload(song_key, song_name, link):
    file_path = download_file(song_name, link)
    if filesize(file_path) > 5 * 1024:
        info("Uploading song {} to drive".format(song_name))
        GDrive().upload(song_key, song_name, file_path)
    else:
        info(
            "File for song {} was less than 5 KB, therefore not uploading song"
            .format(song_name))
    delete_file(file_path)
    info("Song {} deleted from temp storage".format(song_name))
    notify(song_name)
    if os.path.exists('{}.index'.format(weights_path)):
        debug('weights file exists at {}'.format(weights_path))
        try:
            training_model.load_weights(weights_path)
            training_model.compile(
                optimizer=tf.keras.optimizers.Adam(),
                loss=tf.keras.losses.CategoricalCrossentropy(),
                metrics=['accuracy'])
        except Exception as ex:
            error('error during weights loading: {}'.format(ex))
            exit()
    else:
        debug(
            'weights file does not exist => this network needs to be trained')
        training_model.compile(optimizer=tf.keras.optimizers.Adam(),
                               loss=tf.keras.losses.CategoricalCrossentropy(),
                               metrics=['accuracy'])
        info('Start training:')
        training_model.fit(training_ds,
                           validation_data=validation_ds,
                           verbose=1,
                           batch_size=5,
                           epochs=20,
                           callbacks=[TB_callback, checkpoint_callback])
        info('Model summary:')
        training_model.summary()
    info('Performance evaluation:')
    results = training_model.evaluate(test_ds, verbose=1)
    info('Results: \n\tLoss => {}\n\t Accuracy => {:5.2f}%'.format(
        results[0], results[1] * 100))
def test_logging(args):
    msg = args[0]
    info(msg)
from Logger import warn, debug, info
import Logger
import multiprocessing

Logger.basicConfig(level=Logger.DEBUG, format="%(asctime)s %(levelname)s %(message)s", filename="logger_test.log")


def test_logging(args):
    msg = args[0]
    info(msg)


if __name__ == "__main__":

    #
    #   call from main process to make sure works
    #
    info("starting")

    #
    #   call from child processes in pool
    #
    pool = multiprocessing.Pool(processes=4)  # start 4 worker processes
    function_parameters = list()
    for a in range(200):
        function_parameters.append(("message #%3d" % a,))
    pool.map(test_logging, function_parameters)

    print Logger.getCounts()
Example #10
0
def delete_file(file_path):
    import os
    if os.path.exists(file_path):
        os.remove(file_path)
    else:
        info("The file '{}' does not exist".format(file_path))
Example #11
0
def get_song_list_from_shazam(upto_timestamp=None):
    url = "https://www.shazam.com/discovery/v4/en-US/US/web/-/tag/AC53CA17-11C1-4F7D-91FC-93FCD57B878B"
    i = 0
    song_names = []
    song_keys = set()
    token = ""
    random_uuid = str(uuid.uuid4())

    # found = False  # change this to false when blocked on some song

    while True:
        if i == 0:
            # tagId is random uuid, backup=9c26cc1d-a28f-470f-90b0-161a8d04e886

            token = '{"accountId": {"s": "accountid"},' \
                    '"tagId": {"s": "' + random_uuid + '"},' '"timestamp": {"n": "' \
                    + str(int(str(time.time()).split('.')[0] + '000')) + '"}}'
            # print(token)
            token = str(token).encode("utf-8")
            token = str(base64.b64encode(token))[2:-1]

        querystring = {"limit": "100", "token": token}

        headers = {
            'authority':
            "www.shazam.com",
            'pragma':
            "no-cache",
            'cache-control':
            "no-cache,no-cache",
            'user-agent':
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) "
            "Chrome/78.0.3904.97 Safari/537.36",
            'dnt':
            "1",
            'accept':
            "*/*",
            'sec-fetch-site':
            "same-origin",
            'sec-fetch-mode':
            "cors",
            'referer':
            "https://www.shazam.com/myshazam",
            'accept-encoding':
            "gzip, deflate, br",
            'accept-language':
            "en-US,en;q=0.9,hi;q=0.8,mr;q=0.7",
            'cookie':
            "fbm_210827375150=base_domain=.shazam.com; geoip_country=US; geoip_lat=32.925; "
            "geoip_long=-96.892; "
            "codever"
            "=a3b244b8d73c4f5e426954d6822ca00e1c1bdcb949071b552b8165dccaa1d86e4e5dd7bbc50ba313bd270b5208be195a167094d3bfc988a8102dc288dda8f2e8550e629473614471cc148a5d307fb5d0cc548e0b0d3369f5fabef9d71c65dd6e0a93d41232981ee6409c03a760ef8fe0a4c57f308300f1db3defa3a8351b38754737ac20caf41f67adeb349ffb7eb66504f4fb083e4033cdfe92",
            'Host':
            "www.shazam.com",
            'Connection':
            "keep-alive"
        }

        response = requests.get(url, headers=headers, params=querystring)
        # print(response)
        if not response.text:
            # print("returning")
            return song_names
        # print(response.text)
        j = json.loads(response.text)
        tags = j["tags"]
        breakout = False
        for x in tags:
            # stuck_song_key = -1  # get id from mac intellij db
            # if not found and x['track']['key'] == str(stuck_song_key):
            #     found = True

            # if not found:
            #     continue

            if x['track']['key'] not in song_keys:

                key = x['track']['key']
                name = x["track"]["heading"]["title"] + " - " + x["track"][
                    "heading"]["subtitle"]
                ts = x['timestamp']

                if upto_timestamp and str(ts) <= str(upto_timestamp):
                    # print("Breaking from the loop, not paging")
                    breakout = True
                    break
                already_exists = check_already_exists(key)
                if already_exists:
                    info("Song {} already exists, updating timestamp".format(
                        name))
                    notify("Song {} already downloaded!".format(name),
                           customText=True)
                    update_timestamp(key, ts)
                    if not already_exists[3]:
                        info(
                            "Song {} does not have link. Trying to find downloadable link..."
                            .format(name))
                        update_song_link(key, name)
                    continue

                song_keys.add(key)
                song_names.append(name)

                save_to_db(song_key=key, song_name=name, song_timestamp=ts)
                info("Added {} to DB".format(name))
        i = 1
        if 'token' not in j:
            break
        if breakout:
            break
        token = j['token']
    return song_names
Example #12
0
    # change the arg to None when continuing
    # return get_song_list_from_shazam(None)
    return get_song_list_from_shazam(upto_timestamp=res[0] if res else None)


check_old_songs_count = 0
delete_log_file_count = 0

check_old_songs_interval = 30
delete_log_file_interval = 50

poll_interval = 10
while True:
    try:
        info("Polling... ")
        added = check_new()
        info("{} songs added".format(len(added)))
        check_old_songs_count += 1
        delete_log_file_count += 1

        if check_old_songs_count == check_old_songs_interval:
            info("Checking old songs for new links...")
            check_old_songs()
            check_old_songs_count = 0

        if delete_log_file_count == delete_log_file_interval:
            info("Resetting log file...")
            time.sleep(0.5)
            delete_log_file_count = 0
if __name__ == '__main__':
    help(argv[0]) if len(argv) < 2 else None

    weigths_path = argv[2] if argv[2] else './weights_result'

    classificator = MalwareClassificator(6, 200, family_labels)

    try:
        if len(argv) >= 3:
            classificator.load_weights(argv[2])
        else:
            classificator.load_weights()
    except IOError as e:
        error(str(e))
        exit()
    
    if not '-q' in argv:
            debug('model correctly loaded')
            debug('loading image {}'.format(argv[1]))
    
    try:
        prediction = classificator.predict(argv[1])
    except Exception as e:
        error(str(e))
        exit()
    
    classificator.model.model().summary()
    exit()

    info('prediction for the image: {}'.format(classificator.get_label_from_prediction(prediction)))