Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('inputdir', metavar='[input_dir]',
                        help='input directory with E01/info files')
    args = parser.parse_args()

    if not os.path.exists(args.inputdir):
        sys.exit('Quitting: Input directory does not exist.')

    # Check for space; quit if not available
    input_size = 0
    for dirpath, dirnames, filenames in os.walk(args.inputdir):
        for f in filenames:
            fp = os.path.join(dirpath, f)
            input_size += os.path.getsize(fp)
    print("The size of the input directory is: {0}".format(size(input_size)))
    print("To continue, you need {0} of free space.".format(size(input_size*4)))
    proceed = str(input("Do you want to continue? y/N "))
    if not proceed.startswith('y'):
        sys.exit("Quitting.")

    # Run all the individual triage scripts
    organizeDirs(args.inputdir)
    guymagerLogMD(args.inputdir)
    filesystemID(args.inputdir)
    level1Data(args.inputdir)
    mergeOutputs(args.inputdir)
    def test_image_lifecycle_from_local(self):
        test_name = 'test_image_lifecycle_from_local'
        start_time = time.time()
        data = os.path.join(os.getcwd(), 'files/cirros-0.3.0-i386-disk.vmdk')
        image = self.glance.images.create(
            name='image_test1',
            container_format='bare',
            is_public=True,
            disk_format='vmdk',
            data=open(data, 'rb'),
            properties={'vmware-disktype': 'sparse',
                 'vmware-adaptertype': 'ide'})
        self.wait_for_image_status(image, 'active')
        print (
            test_name, '%s uploaded in %s sec' %
            (str(size(image.size)), str(round(time.time() - start_time))))

        start_time = time.time()
        self.glance.images.data(image)
        print (
            test_name, '%s got in %s sec' %
            (str(size(image.size)), str(round(time.time() - start_time))))

        start_time = time.time()
        self.glance.images.delete(image)
        print (
            test_name, '%s deleted in %s sec' %
            (str(size(image.size)), str(round(time.time() - start_time))))
Esempio n. 3
0
def render_summary(stats):
    """
    Render summary of an event stream run.
    :param stats: Dictionary('clock':list()<float>, 'rss':list()<int>)
    :return: Void.
    """
    print('\nSummary profile from stream execution:')
    print('Samples: %i' % len(stats['clock']))
    if -1 in stats['clock']:
        print('(ERRORS DETECTED: Removing timing samples from aborted invocations.)')
        stats['clock'] = [x for x in stats['clock'] if x > 0]
        print('New sample size: %i' % len(stats['clock']))
    median = sorted(stats['clock'])[math.trunc(len(stats['clock']) / 2)]
    print(stats['clock'])
    print('Clock time:\n'
          '\tMin: %ims, Max: %ims, Median: %ims, Median Billing Bucket: %ims, Rounded Standard Deviation: %sms' % (
              min(stats['clock']),
              max(stats['clock']),
              median,
              billing_bucket(median),
              math.trunc(math.ceil(numpy.std(stats['clock'], ddof=1)))
          )) if len(stats['clock']) > 0 else print("No valid timing samples!")
    print('Peak resident set size (memory):\n'
          '\tMin: %s, Max: %s' % (
              size(min(stats['rss'])),
              size(max(stats['rss']))
          ))
    def test_image_lifecycle_copy_from(self):
        test_name = 'test_image_lifecycle_copy_from'
        start_time = time.time()
        image = self.glance.images.create(
            name='image_test1',
            container_format='bare',
            is_public=True,
            disk_format='vmdk',
            copy_from=self.http_image,
            properties={'vmware-disktype': 'sparse',
                 'vmware-adaptertype': 'ide'})
        self.wait_for_image_status(image, 'active')
        print (
            test_name, '%s uploaded in %s sec' %
            (str(size(image.size)), str(round(time.time() - start_time))))

        start_time = time.time()
        self.glance.images.data(image)
        print (
            test_name, '%s got in %s sec' %
            (str(size(image.size)), str(round(time.time() - start_time))))

        start_time = time.time()
        self.glance.images.delete(image)
        print (
            test_name, '%s deleted in %s sec' %
            (str(size(image.size)), str(round(time.time() - start_time))))
def main():
    # logentries config
    log = logging.getLogger('logentries')
    log.setLevel(logging.INFO)
    handler = LogentriesHandler(LOGENTRIES_TOKEN)
    log.addHandler(handler)

    # Make tmp dir if needed...
    if not os.path.exists(TMP_DIR):
	    os.makedirs(TMP_DIR)

    # Are we prepending hostname to filename?
    hostname = (socket.gethostname() + '-') if(OPTION_USE_HOST == True) else ''

    MYSQL_TMP_FILE = re.sub('[\\/:\*\?"<>\|\ ]', '-', hostname + 'backup' + get_timestamp()) + '.sql'

    # Got final filename, continue on...
    log.info("Connecting to Dropbox...")
    connect_to_dropbox()

    log.info("Connected to Dropbox as " + dropbox_info['display_name'])

    log.info("Creating MySQL backup, please wait...")
    do_mysql_backup(MYSQL_TMP_FILE)

    log.info("Backup done. File is " + size(os.path.getsize(TMP_DIR + MYSQL_TMP_FILE)))

    if OPTION_COMPRESS == True:
        log.info("compressing enabled - compressing file...")

        compression_level = 9 # 1-9
        srcFile = TMP_DIR + MYSQL_TMP_FILE
        dstFile = srcFile + '.zip'
        pyminizip.compress(srcFile, dstFile, COMPRESS_PASSWORD, compression_level)

        # Delete uncompressed TMP_FILE, set to .zip
        os.unlink(srcFile)
        MYSQL_TMP_FILE = MYSQL_TMP_FILE + '.zip'

        # Tell the user how big the compressed file is:
        log.info("File compressed. New filesize: " + size(os.path.getsize(TMP_DIR + MYSQL_TMP_FILE)))


    log.info("Uploading backup to Dropbox...")
    tmp_file = open(TMP_DIR + MYSQL_TMP_FILE)

    result = dropbox_client.put_file(DROPBOX_FOLDER + MYSQL_TMP_FILE, tmp_file, True)
    # TODO: Check for dropbox.rest.ErrorResponse

    log.info("File uploaded as '" + result['path'] + "', size: " + result['size'])

    log.info("Cleaning up...")
    os.unlink(TMP_DIR + MYSQL_TMP_FILE)

    log.info("Backup completed")

    # need some time to ensure logentries
    time.sleep(10)
def main():
    # Make tmp dir if needed...
    if not os.path.exists(TMP_DIR):
	    os.makedirs(TMP_DIR)

    # Are we prepending hostname to filename?
    hostname = (socket.gethostname() + '-') if(OPTION_USE_HOST == True) else ''

    MYSQL_TMP_FILE  = re.sub('[\\/:\*\?"<>\|\ ]', '-', hostname + 'backup-' + get_timestamp()) + '.sql'

    # Got final filename, continue on...
    print "Connecting to Dropbox..."
    connect_to_dropbox()

    print "Connected to Dropbox as " + dropbox_info['display_name']

    print "Creating MySQL backup, please wait..."
    do_mysql_backup(MYSQL_TMP_FILE)

    print "Backup done. File is " + size(os.path.getsize(TMP_DIR + MYSQL_TMP_FILE))

    if OPTION_GZIP == True:
        print "GZip enabled - compressing file..."

        # Write uncompressed file to gzip file:

        # Rant: Is chdir() really the only good way to get rid of dir structure in gz
        # files? GzipFile sounds like it would work, but....
        os.chdir(TMP_DIR)

        sql_file = open(TMP_DIR + MYSQL_TMP_FILE, 'rb')
        gz_file  = gzip.open(MYSQL_TMP_FILE + '.gz', 'wb')

        gz_file.writelines(sql_file)

        sql_file.close()
        gz_file.close()

        # Delete uncompressed TMP_FILE, set to .gz
        os.unlink(TMP_DIR + MYSQL_TMP_FILE)
        MYSQL_TMP_FILE = MYSQL_TMP_FILE + '.gz'

        # Tell the user how big the compressed file is:
        print "File compressed. New filesize: " + size(os.path.getsize(TMP_DIR + MYSQL_TMP_FILE))

    
    tmp_size = os.path.getsize(TMP_DIR + MYSQL_TMP_FILE)
    tmp_file = open(TMP_DIR + MYSQL_TMP_FILE, 'rb')

    print "Uploading backup to Dropbox..."
    uploader = dropbox_client.get_chunked_uploader(tmp_file, tmp_size)

    while uploader.offset < tmp_size:
        try:
            upload = uploader.upload_chunked(1024 * 1024)
        except rest.ErrorResponse, e:
            print "Error: %d %s" % (e.errno, e.strerror)
            pass
Esempio n. 7
0
 def while_waiting(self):
     if self.parentApp.fastd_data:
         self.uptime.value = str(datetime.timedelta(milliseconds=int(self.parentApp.fastd_data["uptime"]))) 
         self.peers.value = str(len(self.parentApp.fastd_data["peers"]))
         self.rxpkts.value = str(self.parentApp.fastd_data["statistics"]["rx"]["packets"])
         self.rxbytes.value = str(size(self.parentApp.fastd_data["statistics"]["rx"]["bytes"], system=verbose))
         self.rxropkts.value = str(self.parentApp.fastd_data["statistics"]["rx_reordered"]["packets"])
         self.rxrobytes.value = str(size(self.parentApp.fastd_data["statistics"]["rx_reordered"]["bytes"], system=verbose))
         self.txpkts.value = str(self.parentApp.fastd_data["statistics"]["tx"]["packets"])
         self.txbytes.value = str(size(self.parentApp.fastd_data["statistics"]["tx"]["bytes"], system=verbose))
         self.txdpdpkts.value = str(self.parentApp.fastd_data["statistics"]["tx_dropped"]["packets"])
         self.txdpdbytes.value = str(size(self.parentApp.fastd_data["statistics"]["tx_dropped"]["bytes"], system=verbose))
         self.txerrpkts.value = str(self.parentApp.fastd_data["statistics"]["tx_error"]["packets"])
         self.txerrbytes.value = str(size(self.parentApp.fastd_data["statistics"]["tx_error"]["bytes"], system=verbose))
         
                     
         rows = []
         peer_counter = 1
         connected_peers = 0
         for peer in self.parentApp.fastd_data["peers"]:
             peer_obj = self.parentApp.fastd_data["peers"][peer]
             row = []
             if peer_obj["name"]:
                 name = """{0}. {1}""".format(str(peer_counter), str(peer_obj["name"]))
                 row.append(name)
             else:
                 name = """{0}. {1}""".format(str(peer_counter), 'No name set')
                 row.append(name)
             row.append(str(peer_obj["address"]))
             if peer_obj["connection"]:
                 connected_peers +=1
                 if peer_obj["connection"]["mac_addresses"]:
                     row.append(str(peer_obj["connection"]["mac_addresses"][0]))
                 else:
                     row.append('No MAC-Address found')
             else:
                 row.append('No connection info')
             row.append(str(peer))
             rows.append(row)
             peer_counter += 1
         self.clientsbox.values = rows
         self.clientsbox.fastd_data = self.parentApp.fastd_data
         self.conn_peers.value = str(connected_peers)
     self.uptime.display()
     self.peers.display()
     self.conn_peers.display()
     self.rxpkts.display()
     self.rxbytes.display()
     self.rxropkts.display()
     self.rxrobytes.display()
     self.txpkts.display()
     self.txbytes.display()
     self.txdpdpkts.display()
     self.txdpdbytes.display()
     self.txerrpkts.display()
     self.txerrbytes.display()
     self.clientsbox.display()
Esempio n. 8
0
    def create_archive(self, archive_dir, container, backup_dir, arc_block):
        """ Create an encrypted container.  The resulting containersize is only
        accurate to the nearest megabyte.  Return 1 if any problems or 0 for
        success. """
        if self.args.create:
            status_item('!! CREATE CONTAINER')
            status_result('CONFIRMED', 4)
        else:
            status_item('!! CREATE CONTAINER? (y/n)')

        if self.args.create or raw_input() == 'y':
            archive_size = int(float(arc_block) /
                               float(self.config.provision_capacity_percent)
                               * 100)
            status_item('Archive Block Size')
            status_result(str(arc_block) + ' (' + size(arc_block) + ')')
            status_item('Provision Capacity')
            status_result(str(archive_size) + ' (' + size(archive_size) + ')')
            status_item('Required Container Size')
            # Round to the nearest megabyte to speed up dd blocksize below.
            container_size_needed_m = \
                int(math.ceil(self.calc_archive_container(archive_size)
                    / 1048576))

            status_result(str(container_size_needed_m * 1048576) + ' (' +
                          str(container_size_needed_m) + 'M)')
            status_item('Generating Container')
            status_result('IN PROGRESS', 2)
            try:
                if self.args.verbose:
                    status_item('Command')
                    status_result('dd if=/dev/zero bs=1048576 status=none ' +
                                  'count=' + str(container_size_needed_m) +
                                  ' ' + 'of=' + container)

                print

                # TODO: need to convert this into a scheme which is not
                # dependent on Unix pipes
                subprocess.check_call('dd if=/dev/zero bs=1048576 ' +
                                      'status=none ' +
                                      'count=' + str(container_size_needed_m) +
                                      ' | pv -s ' +
                                      str(container_size_needed_m * 1048576) +
                                      ' | ' + 'dd status=none ' +
                                      'of=' + container, shell=True)
                print
            except subprocess.CalledProcessError, e:
                status_item('Generation Result')
                status_result('FAILED: ' + str(e), 3)
                return 1
            except Exception, e:
                status_item('Generation Result')
                print e
                if re.match('.*No such file or directory', str(e)):
                    status_result('COMMAND NOT FOUND', 3)
                    return 1
def main():
    # Make tmp dir if needed...
    if not os.path.exists(TMP_DIR):
	    os.makedirs(TMP_DIR)

    # Are we prepending hostname to filename?
    hostname = (socket.gethostname() + '-') if(OPTION_USE_HOST == True) else ''

    MYSQL_TMP_FILE  = re.sub('[\\/:\*\?"<>\|\ ]', '-', hostname + 'backup-' + get_timestamp()) + '.sql'

    # Got final filename, continue on...
    print "Connecting to Dropbox..."
    connect_to_dropbox()

    print "Connected to Dropbox as " + dropbox_info['display_name']

    print "Creating MySQL backup, please wait..."
    do_mysql_backup(MYSQL_TMP_FILE)

    print "Backup done. File is " + size(os.path.getsize(TMP_DIR + MYSQL_TMP_FILE))

    if OPTION_GZIP == True:
        print "GZip enabled - compressing file..."

        # Write uncompressed file to gzip file:

        # Rant: Is chdir() really the only good way to get rid of dir structure in gz
        # files? GzipFile sounds like it would work, but....
        os.chdir(TMP_DIR)

        sql_file = open(TMP_DIR + MYSQL_TMP_FILE, 'rb')
        gz_file  = gzip.open(MYSQL_TMP_FILE + '.gz', 'wb')

        gz_file.writelines(sql_file)

        sql_file.close()
        gz_file.close()

        # Delete uncompressed TMP_FILE, set to .gz
        os.unlink(TMP_DIR + MYSQL_TMP_FILE)
        MYSQL_TMP_FILE = MYSQL_TMP_FILE + '.gz'

        # Tell the user how big the compressed file is:
        print "File compressed. New filesize: " + size(os.path.getsize(TMP_DIR + MYSQL_TMP_FILE))


    print "Uploading backup to Dropbox..."
    tmp_file = open(TMP_DIR + MYSQL_TMP_FILE)

    result = dropbox_client.put_file(DROPBOX_FOLDER + MYSQL_TMP_FILE, tmp_file)
    # TODO: Check for dropbox.rest.ErrorResponse

    print "File uploaded as '" + result['path'] + "', size: " + result['size']

    print "Cleaning up..."
    os.unlink(TMP_DIR + MYSQL_TMP_FILE)
Esempio n. 10
0
	def about(self):
		db_data = self.db_client.account_info()
		gd_data = self.gd_client.about().get().execute()

		md_quota     = long(db_data['quota_info']['quota'])  + long(gd_data['quotaBytesTotal'])
		md_quotaUsed = long(db_data['quota_info']['normal']) + long(gd_data['quotaBytesUsed'])
		md_quotaFree = md_quota - md_quotaUsed

		db_quota     = db_data['quota_info']['quota']
		db_quotaUsed = db_data['quota_info']['normal']
		db_quotaFree = db_quota - db_quotaUsed

		gd_quota     = long(gd_data['quotaBytesTotal'])
		gd_quotaUsed = long(gd_data['quotaBytesUsed'])
		gd_quotaFree = gd_quota - gd_quotaUsed


		md_about = {'md_quota'     : size(md_quota, system=si),
		            'md_quotaUsed' : size(md_quotaUsed, system=si),
		            'md_quotaFree' : size(md_quotaFree, system=si),
		            'db_quota'     : size(db_quota, system=si),
		            'db_quotaUsed' : size(db_quotaUsed, system=si),
		            'db_quotaFree' : size(db_quotaFree, system=si),
		            'gd_quota'     : size(gd_quota, system=si),
		            'gd_quotaUsed' : size(gd_quotaUsed, system=si),
		            'gd_quotaFree' : size(gd_quotaFree, system=si),}

		return md_about
Esempio n. 11
0
 def __init__(self, f):
     if not bool(f) or not default_storage.exists(f):
         self.file = ''
         self.last_modification = None
         self.size = size(0)
         self.url = ""
     else:
         self.file = basename(default_storage.path(f))
         self.last_modification = getmtime(default_storage.path(f))
         self.size = size(getsize(default_storage.path(f)))
         self.url = ""
Esempio n. 12
0
def get_size(start_path = '.'):
    total_size = 0
    for dirpath, dirnames, filenames in os.walk(start_path):
        for f in filenames:
            try:
                fp = os.path.join(dirpath, f)
                total_size += os.path.getsize(fp)
                print str(total_size)+" bytes / "+str(size(total_size))+" counted"+" <------------ current position: "+start_path+" : "+f
                for location in locations_dict:
                    if locations_dict[location][1] != "":
                    	print str(location)+": "+str(size(locations_dict[location][1]))
            except OSError, e:
                print e
Esempio n. 13
0
def test_docker_run_memory_swap(client, shipy):
    farg = '--memory-swap'
    fval = ('110M',)

    sarg = '--memory'
    sval = '100M'
    container = run_template(client, shipy, farg=farg, fval=fval,
                             sarg=sarg, sval=sval)

    assert sval == \
           size(client.inspect_container(container)['HostConfig']['Memory'])

    assert fval[0] == \
           size(client.inspect_container(container)['HostConfig']['MemorySwap'])
Esempio n. 14
0
    def _render(self, file_pointer, file_path):

        if self._check_size(file_pointer):
            max_kb = size(self.MAX_SIZE)
            file_kb = size(os.stat(file_pointer.name).st_size)
            return """
        There was an error rendering {}
        <div>This file is too big: Max size = {}; File size = {}</div>
        """.format(file_pointer.name, max_kb, file_kb)

        _, file_name = os.path.split(file_pointer.name)
        exporters = self.render_exporters(file_name)
        rendered = self.render(file_pointer, file_path)
        return exporters + '\n' + rendered
Esempio n. 15
0
 def _download_file(self, fileinfo):
     filename = fileinfo.get_name()
     if not self.hidden and filename.startswith('.'):
         if DEBUG:
             print('Skipping hidden file %s' % filename)
         return
     try:
         print('Downloading file %s' % str(filename))
     except UnicodeEncodeError:
         print('Whoa! A unicode encode error!')
         return
     self.newfiles += 1
     # pydevd.settrace()
     if self.newer:
         if os.path.isfile(filename):
             fsize = os.stat(filename).st_size
             if fsize == float(fileinfo.get_bytes()) and \
                float(fileinfo.get_modified()) >= float(fileinfo.get_modified()):
                 if True:
                     print('Skipping unmodified file %s' % filename)
                 return
     fo = open(filename, 'wb')
     _status, stream = fileinfo.get_content()
     fsize = fileinfo.get_bytes()
     buf = stream.read(BLKSIZE)
     if not buf:
         fo.write(buf)
     else:
         wrote = 0
         while buf:
             fo.write(buf)
             #time.sleep(1)  # This seems to help prevent IncompleteRead exceptions
             if self.verbose:
                 wrote += len(buf)
                 if float(fsize) != 0:
                     pct = (wrote / float(fsize)) * 100
                     print('Size: %s %f%% done - Wrote %s bytes    ' % (size(fsize),
                                                                    pct,
                                                                    size(wrote)),
                        end='\r')
             try:
                 buf = stream.read(BLKSIZE)
             except:
                 print('')
                 raise
         if self.verbose:
             print('')
     fo.close()
     sys.stdout.flush()
Esempio n. 16
0
    def __init__(self, fullName, parent=None):
        self._fullName = os.path.abspath(fullName)
        self.attr = Metadata(self._fullName)
        self.attr.dateModified = time.strftime(self.__datetime_format, time.gmtime(os.path.getmtime(fullName)))

        self.data = os.path.basename(self._fullName)
        self._isDirectory = os.path.isdir(fullName)
        if self._isDirectory:
            self.state = "closed"
            self.attr.rel = "folder"
            self.attr.size = filesize.size(FileNode.getsize(self._fullName))
        else:
            self.attr.rel = "file"
            self.attr.size = filesize.size(os.path.getsize(self._fullName))
        self.children = []
Esempio n. 17
0
    def test_do_info_draw_general_docker_image(self, mock_table_draw, mock_table_add_row,
                                               mock_draw_source, mock_draw_generation):
        # given
        i = self.prepare_image()
        info_image = info_utils.create_image_format_docker()

        # when
        i.do_info_draw_general(info_image)

        # then
        calls = []
        calls.append(call(["Name", info_image.name]))
        calls.append(call(["Format", info_image.targetFormat.name]))
        calls.append(call(["Id", info_image.dbId]))
        calls.append(call(["Version", info_image.version]))
        calls.append(call(["Revision", info_image.revision]))
        calls.append(call(["Uri", info_image.uri]))
        calls.append(call(["Created", info_image.created.strftime("%Y-%m-%d %H:%M:%S")]))
        calls.append(call(["Size", size(info_image.fileSize)]))
        calls.append(call(["Compressed", "Yes" if info_image.compress else "No"]))
        calls.append(call(["RegisteringName", info_image.registeringName]))
        calls.append(call(["Entrypoint", info_image.entrypoint]))

        mock_table_draw.assert_called_once()
        mock_draw_source.assert_called_once()
        mock_draw_generation.assert_called_once()
        assert mock_table_add_row.call_count == 11
        mock_table_add_row.assert_has_calls(calls)
Esempio n. 18
0
def test_docker_run_shm_size(client, shipy):
    farg = '--shm-size'
    fval = ('100M',)
    container = run_template(client, shipy, farg=farg, fval=fval)

    assert fval[0] == \
           size(client.inspect_container(container)['HostConfig']['ShmSize'])
    def set_options(self, *args, **kwargs):
        self.url = 'http://campaignfinance.cdn.sos.ca.gov/dbwebexport.zip'
        self.verbosity = int(kwargs['verbosity'])

        if kwargs['test_data']:
            self.data_dir = get_test_download_directory()
            settings.CALACCESS_DOWNLOAD_DIR = self.data_dir
            if self.verbosity:
                self.log("Using test data")
        else:
            self.data_dir = get_download_directory()

        os.path.exists(self.data_dir) or os.makedirs(self.data_dir)
        self.zip_path = os.path.join(self.data_dir, 'calaccess.zip')
        self.tsv_dir = os.path.join(self.data_dir, "tsv/")
        self.csv_dir = os.path.join(self.data_dir, "csv/")
        os.path.exists(self.csv_dir) or os.makedirs(self.csv_dir)
        if kwargs['download']:
            self.download_metadata = self.get_download_metadata()
            self.local_metadata = self.get_local_metadata()
            prompt_context = dict(
                last_updated=self.download_metadata['last-modified'],
                time_ago=naturaltime(self.download_metadata['last-modified']),
                size=size(self.download_metadata['content-length']),
                last_download=self.local_metadata['last-download'],
                download_dir=self.data_dir,
            )
            self.prompt = render_to_string(
                'calaccess_raw/downloadcalaccessrawdata.txt',
                prompt_context,
            )
Esempio n. 20
0
def test_docker_run_memory(client, shipy):
    farg = '--memory'
    fval = ('100M',)
    container = run_template(client, shipy, farg=farg, fval=fval)

    assert fval[0] == \
           size(client.inspect_container(container)['HostConfig']['Memory'])
Esempio n. 21
0
def system_info():
    viewer_log_file = '/tmp/sync_viewer.log'
    if path.exists(viewer_log_file):
        viewlog = check_output(['tail', '-n', '20', viewer_log_file]).split('\n')
    else:
        viewlog = ["(no viewer log present -- is only the sync server running?)\n"]

    # Get load average from last 15 minutes and round to two digits.
    loadavg = round(getloadavg()[2], 2)

    try:
        run_tvservice = check_output(['tvservice', '-s'])
        display_info = re_split('\||,', run_tvservice.strip('state:'))
    except:
        display_info = False

    # Calculate disk space
    slash = statvfs("/")
    free_space = size(slash.f_bavail * slash.f_frsize)

    # Get uptime
    uptime_in_seconds = uptime()
    system_uptime = timedelta(seconds=uptime_in_seconds)

    return template('system_info', viewlog=viewlog, loadavg=loadavg, free_space=free_space, uptime=system_uptime, display_info=display_info)
Esempio n. 22
0
def upload_files_to_site(args):
    bold('Do you want to upload the files to w3af.org? [Y/n]', newline=False)
    upload = raw_input()
    upload = upload.strip()

    if upload.lower() == 'y' or upload.lower() == 'yes' or upload == '':
        files = [
                 'w3af-%s.tar.bz2.md5sum' % args.release_version,
                 'w3af-%s.tar.bz2' % args.release_version,
                 'w3af-sphinx-%s.tar.bz2' % args.release_version,
                 ]

        for filename in files:

            fsize = size(os.path.getsize(filename))
            bold('Uploading %s with file size of %s' % (filename, fsize,))

            with settings(host_string='*****@*****.**'):
                success = put(filename, UPLOAD_PATH, use_sudo=True)

                if not success:
                    red('File upload failed!')
                    return False

    green('Uploaded files to w3af.org!')
    bold('Remember to add links to these files from wordpress.')

    return True
    def set_options(self, *args, **kwargs):
        # Check for the user-defined data dir
        # otherwise put the data in the data dir under the project root

        data_dir = getattr(settings, 'CALACCESS_DOWNLOAD_DIR', os.path.join(settings.BASE_DIR, 'data'))


        self.url = 'http://campaignfinance.cdn.sos.ca.gov/dbwebexport.zip'
        self.data_dir = data_dir
        self.zip_path = os.path.join(self.data_dir, 'calaccess.zip')
        self.tsv_dir = os.path.join(self.data_dir, "tsv/")
        self.csv_dir = os.path.join(self.data_dir, "csv/")
        os.path.exists(self.csv_dir) or os.mkdir(self.csv_dir)
        self.metadata = self.get_metadata()
        self.prompt = PROMPT % (
            dateformat(self.metadata['last-modified'], 'N j, Y'),
            dateformat(self.metadata['last-modified'], 'P'),
            naturaltime(self.metadata['last-modified']),
            size(self.metadata['content-length']),
            self.data_dir,
        )
        self.pbar = progressbar.ProgressBar(
            widgets=[
                progressbar.Percentage(),
                progressbar.Bar(),
                ' ',
                progressbar.ETA(),
                ' ',
                progressbar.FileTransferSpeed()
            ],
            maxval=self.metadata['content-length']
        )
Esempio n. 24
0
def system_info():
    viewer_log_file = "/tmp/screenly_viewer.log"
    if path.exists(viewer_log_file):
        viewlog = check_output(["tail", "-n", "20", viewer_log_file]).split("\n")
    else:
        viewlog = ["(no viewer log present -- is only the screenly server running?)\n"]

    # Get load average from last 15 minutes and round to two digits.
    loadavg = round(getloadavg()[2], 2)

    try:
        run_tvservice = check_output(["tvservice", "-s"])
        display_info = re_split("\||,", run_tvservice.strip("state:"))
    except:
        display_info = False

    # Calculate disk space
    slash = statvfs("/")
    free_space = size(slash.f_bavail * slash.f_frsize)

    # Get uptime
    uptime_in_seconds = uptime()
    system_uptime = timedelta(seconds=uptime_in_seconds)

    return template(
        "system_info",
        viewlog=viewlog,
        loadavg=loadavg,
        free_space=free_space,
        uptime=system_uptime,
        display_info=display_info,
    )
Esempio n. 25
0
def system_info():
    viewlog = None
    try:
        viewlog = [line.decode('utf-8') for line in
                   check_output(['sudo', 'systemctl', 'status', 'screenly-viewer.service', '-n', '20']).split('\n')]
    except:
        pass

    loadavg = diagnostics.get_load_avg()['15 min']

    display_info = diagnostics.get_monitor_status()

    display_power = diagnostics.get_display_power()

    # Calculate disk space
    slash = statvfs("/")
    free_space = size(slash.f_bavail * slash.f_frsize)

    # Get uptime
    uptime_in_seconds = diagnostics.get_uptime()
    system_uptime = timedelta(seconds=uptime_in_seconds)

    # Player name for title
    player_name = settings['player_name']

    return template(
        'system_info.html',
        player_name=player_name,
        viewlog=viewlog,
        loadavg=loadavg,
        free_space=free_space,
        uptime=system_uptime,
        display_info=display_info,
        display_power=display_power
    )
Esempio n. 26
0
def system_info():
    viewer_log_file = '/tmp/screenly_viewer.log'
    if path.exists(viewer_log_file):
        viewlog = check_output(['tail', '-n', '20', viewer_log_file]).split('\n')
    else:
        viewlog = ["(no viewer log present -- is only the screenly server running?)\n"]

    loadavg = diagnostics.get_load_avg()['15 min']

    display_info = diagnostics.get_monitor_status()

    # Calculate disk space
    slash = statvfs("/")
    free_space = size(slash.f_bavail * slash.f_frsize)

    # Get uptime
    uptime_in_seconds = diagnostics.get_uptime()
    system_uptime = timedelta(seconds=uptime_in_seconds)

    return template(
        'system_info',
        viewlog=viewlog,
        loadavg=loadavg,
        free_space=free_space,
        uptime=system_uptime,
        display_info=display_info
    )
Esempio n. 27
0
def system_info():
    viewer_log_file = '/tmp/screenly_viewer.log'
    if path.exists(viewer_log_file):
        viewlog = check_output(['tail', '-n', '20', viewer_log_file]).split('\n')
    else:
        viewlog = ["(no viewer log present -- is only the screenly server running?)\n"]

    loadavg = getloadavg()[2]

    try:
        resolution = check_output(['tvservice', '-s']).strip()
    except:
        resolution = None

    # Calculate disk space
    slash = statvfs("/")
    free_space = size(slash.f_bsize * slash.f_bavail)

    # Get uptime
    try:
        with open('/proc/uptime', 'r') as f:
            uptime_seconds = float(f.readline().split()[0])
            uptime = str(timedelta(seconds=uptime_seconds))
    except:
        uptime = None

    return template('system_info', viewlog=viewlog, loadavg=loadavg, free_space=free_space, uptime=uptime, resolution=resolution)
Esempio n. 28
0
    def do_info_draw_general(self, info_image):
        table = Texttable(0)
        table.set_cols_dtype(["a", "t"])
        table.set_cols_align(["l", "l"])

        table.add_row(["Name", info_image.name])
        table.add_row(["Format", info_image.targetFormat.name])
        table.add_row(["Id", info_image.dbId])
        table.add_row(["Version", info_image.version])
        table.add_row(["Revision", info_image.revision])
        table.add_row(["Uri", info_image.uri])

        self.do_info_draw_source(info_image.parentUri, table)

        table.add_row(["Created", info_image.created.strftime("%Y-%m-%d %H:%M:%S")])
        table.add_row(["Size", size(info_image.fileSize)])
        table.add_row(["Compressed", "Yes" if info_image.compress else "No"])

        if self.is_docker_based(info_image.targetFormat.format.name):
            registring_name = None
            if info_image.status.complete:
                registring_name = info_image.registeringName
            table.add_row(["RegisteringName",registring_name])
            table.add_row(["Entrypoint", info_image.entrypoint.replace("\\", "")])

        self.do_info_draw_generation(info_image, table)

        print table.draw() + "\n"
Esempio n. 29
0
def system_info():
    viewlog = None
    try:
        viewlog = check_output(['sudo', 'systemctl', 'status', 'screenly-viewer.service', '-n', '20']).split('\n')
    except:
        pass

    loadavg = diagnostics.get_load_avg()['15 min']

    display_info = diagnostics.get_monitor_status()

    # Calculate disk space
    slash = statvfs("/")
    free_space = size(slash.f_bavail * slash.f_frsize)

    # Get uptime
    uptime_in_seconds = diagnostics.get_uptime()
    system_uptime = timedelta(seconds=uptime_in_seconds)

    return template(
        'system_info',
        viewlog=viewlog,
        loadavg=loadavg,
        free_space=free_space,
        uptime=system_uptime,
        display_info=display_info
    )
 def set_options(self, *args, **kwargs):
     self.url = 'http://campaignfinance.cdn.sos.ca.gov/dbwebexport.zip'
     self.data_dir = get_download_directory()
     os.path.exists(self.data_dir) or os.mkdir(self.data_dir)
     self.zip_path = os.path.join(self.data_dir, 'calaccess.zip')
     self.tsv_dir = os.path.join(self.data_dir, "tsv/")
     self.csv_dir = os.path.join(self.data_dir, "csv/")
     os.path.exists(self.csv_dir) or os.mkdir(self.csv_dir)
     if kwargs['download']:
         self.download_metadata = self.get_download_metadata()
         self.local_metadata = self.get_local_metadata()
         prompt_context = dict(
             last_updated=self.download_metadata['last-modified'],
             time_ago=naturaltime(self.download_metadata['last-modified']),
             size=size(self.download_metadata['content-length']),
             last_download=self.local_metadata['last-download'],
             download_dir=self.data_dir,
         )
         self.prompt = render_to_string(
             'calaccess_raw/downloadcalaccessrawdata.txt',
             prompt_context,
         )
         self.pbar = progressbar.ProgressBar(
             widgets=[
                 progressbar.Percentage(),
                 progressbar.Bar(),
                 ' ',
                 progressbar.ETA(),
                 ' ',
                 progressbar.FileTransferSpeed()
             ],
             maxval=self.download_metadata['content-length']
         )
     self.verbosity = int(kwargs['verbosity'])
Esempio n. 31
0
def get_file_size(filename):
    return size(os.path.getsize(config.index_path + "/" + filename))
Esempio n. 32
0
def gen_stats():
    #DB Inititalization
    conn = sqlite3.connect('oper/dbfile.db')
    conn.isolation_level = None  # turn on autocommit to increase concurency
    c = conn.cursor()

    result = {}
    c.execute(
        "select avg(strftime('%s',BatchStatusUpdateTime) -strftime('%s',AssignedTime) ) from main where BatchStatus=2"
    )
    result['average_batch_time_seconds'] = c.fetchone()[0]

    c.execute('SELECT count(*) FROM main WHERE BatchStatus=1')
    result['batches_assigned'] = c.fetchone()[0]
    c.execute('SELECT count(*) FROM main WHERE BatchStatus=2')
    result['batches_completed'] = c.fetchone()[0]
    c.execute(
        "SELECT count(*) FROM main WHERE BatchStatusUpdateTime> datetime('now', '-10 minute') and BatchStatus=2"
    )
    result['batches_completed_last_10_minutes'] = c.fetchone()[0]
    c.execute(
        "SELECT count(*) FROM main WHERE BatchStatusUpdateTime> datetime('now', '-1 hour') and BatchStatus=2"
    )
    result['batches_completed_last_hour'] = c.fetchone()[0]

    c.execute(
        "SELECT count(BatchContent) FROM main WHERE BatchStatusUpdateTime> datetime('now', '-10 minute') and BatchStatus=2"
    )
    result['exclusions_completed_last_10_minutes'] = c.fetchone()[0]
    c.execute(
        "SELECT count(BatchContent) FROM main WHERE BatchStatusUpdateTime> datetime('now', '-1 hour') and BatchStatus=2"
    )
    result['exclusions_completed_last_hour'] = c.fetchone()[0]
    c.execute(
        'SELECT count(*) FROM main WHERE (BatchStatus=0 OR BatchStatus=1)')
    result['batches_remaining'] = c.fetchone()[0]
    c.execute('SELECT count(*) FROM main')
    result['batches_total'] = c.fetchone()[0]
    c.execute('SELECT sum(BatchSize) FROM main')
    try:
        result['total_data_size'] = c.fetchone()[0]
    except:
        result['total_data_size'] = 0
    if result['total_data_size'] == None:
        result['total_data_size'] = 0
    result['total_data_size_pretty'] = size(result['total_data_size'],
                                            system=alternative)
    c.execute('SELECT count(BatchContent) FROM main')

    try:
        result['batches_completed_percent'] = (
            result['batches_completed'] / (result['batches_total'])
        ) * 100  #-(0.9*result['total_exclusions'])))*100
    except ZeroDivisionError:
        result['batches_completed_percent'] = None

    try:
        result['projected_hours_remaining_10_min_base'] = (
            result['batches_remaining']) / (
                result['batches_completed_last_10_minutes'] * 6)
    except ZeroDivisionError:
        result['projected_hours_remaining_10_min_base'] = None
    try:
        result['projected_hours_remaining_1_hour_base'] = (
            result['batches_remaining']) / (
                result['batches_completed_last_hour'])
        #result['projected_hours_remaining'] = result['projected_hours_remaining_1_hour_base'] #(result['average_batch_time_seconds'] * (result['batches_remaining']-(0.9*result['total_exclusions'])))/3600
    except ZeroDivisionError:
        result['projected_hours_remaining_1_hour_base'] = None
        #result['projected_hours_remaining'] = None

    c.execute('SELECT COUNT(*) FROM workers')
    result['worker_count'] = c.fetchone()[0]
    c.execute(
        "SELECT COUNT(*) FROM workers where LastAliveTime> datetime('now', '-10 minute')"
    )
    result['worker_count_last_10_minutes'] = c.fetchone()[0]
    c.execute(
        "SELECT COUNT(*) FROM workers where LastAliveTime> datetime('now', '-1 hour')"
    )
    result['worker_count_last_hour'] = c.fetchone()[0]

    c.execute(
        "SELECT COUNT(*) FROM workers where LastAliveTime> datetime('now', '-1 hour') AND WorkerVersion=4"
    )
    result['version_4_workers_last_hour'] = c.fetchone()[0]
    try:
        result['percent_version_4_workers_last_hour'] = (
            result['version_4_workers_last_hour'] /
            result['worker_count_last_hour']) * 100
    except ZeroDivisionError:
        result['percent_version_4_workers_last_hour'] = None

    c.execute('SELECT COUNT(DISTINCT LastAliveIP) FROM workers')
    result['worker_ip_count'] = c.fetchone()[0]
    c.execute(
        "SELECT COUNT(DISTINCT LastAliveIP) FROM workers where LastAliveTime> datetime('now', '-10 minute')"
    )
    result['worker_ip_count_last_10_minutes'] = c.fetchone()[0]
    c.execute(
        "SELECT COUNT(DISTINCT LastAliveIP) FROM workers where LastAliveTime> datetime('now', '-1 hour')"
    )
    result['worker_ip_count_last_hour'] = c.fetchone()[0]
    return json.dumps(result)
Esempio n. 33
0
 def generateLogs(self, title, numImages, postLink, postSize):
     LoggingErrors("activity.log", title + "|" + postLink + "|" + str(numImages) + "|" + str(size(postSize)))
     return
Esempio n. 34
0
 def sys_total_memory() -> str:
     return size(StatHelpers._raw_sys_total_memory())
Esempio n. 35
0
'''

from pprint import pprint
import os
from hurry.filesize import size
import sqlite3
import csv
from pprint import pprint

# Change the path for sqlite_file and dirpath according to your data
sqlite_file = 'openstreetmap_sf_db.sqlite'
dirpath = '/Users/nazaninmirarab/Desktop/Data Science/P3/Project/Sizes'

files_list = []
for path, dirs, files in os.walk(dirpath):
    files_list.extend([(filename, size(os.path.getsize(os.path.join(path, filename)))) for filename in files])

for filename, size in files_list:
    print '{:.<40s}: {:5s}'.format(filename,size)


con = sqlite3.connect(sqlite_file)
cur = con.cursor()

def number_of_nodes():
    output = cur.execute('SELECT COUNT(*) FROM nodes')
    return output.fetchone()[0]

print 'Number of nodes: \n' , number_of_nodes()

def number_of_ways():
Esempio n. 36
0
 def get_human_readable(value, system):
     from hurry.filesize import size
     return size(value, system=system)
Esempio n. 37
0
	def NormalLoad(self):
		self.NormalList.setEnabled(True)
		self.NormalRadio.setEnabled(True)
		for i in self.video.streams:
			self.NormalList.addItem(i.resolution+' ('+size(i.get_filesize())+') '+i.extension)
Esempio n. 38
0
def get_total_size(vault, bank):
    (vault, status, status_text, date, last,
     rule) = get_vault_summary(bank, vault)
    return size(_get_os("du -s %s/%s" % (bank, vault)))
Esempio n. 39
0
async def upload_to_gdrive(file_upload, message, messa_ge, g_id):
    await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
    del_it = await message.edit_text(
        f"<a href='tg://user?id={g_id}'>🔊</a> Now Uploading to ☁️ Cloud!!!")
    if not os.path.exists("rclone.conf"):
        with open("rclone.conf", "w+", newline="\n", encoding="utf-8") as fole:
            fole.write(f"{RCLONE_CONFIG}")
    if os.path.exists("rclone.conf"):
        with open("rclone.conf", "r+") as file:
            con = file.read()
            gUP = re.findall("\[(.*)\]", con)[0]
            LOGGER.info(gUP)
    destination = f"{DESTINATION_FOLDER}"
    file_upload = str(Path(file_upload).resolve())
    LOGGER.info(file_upload)
    if os.path.isfile(file_upload):
        g_au = [
            "rclone",
            "copy",
            "--config=rclone.conf",
            f"{file_upload}",
            f"{gUP}:{destination}",
            "-v",
        ]
        LOGGER.info(g_au)
        tmp = await asyncio.create_subprocess_exec(
            *g_au,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE)
        pro, cess = await tmp.communicate()
        LOGGER.info(pro.decode("utf-8"))
        LOGGER.info(cess.decode("utf-8"))
        gk_file = re.escape(os.path.basename(file_upload))
        LOGGER.info(gk_file)
        with open("filter.txt", "w+", encoding="utf-8") as filter:
            print(f"+ {gk_file}\n- *", file=filter)

        t_a_m = [
            "rclone",
            "lsf",
            "--config=rclone.conf",
            "-F",
            "i",
            "--filter-from=filter.txt",
            "--files-only",
            f"{gUP}:{destination}",
        ]
        gau_tam = await asyncio.create_subprocess_exec(
            *t_a_m,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE)
        # os.remove("filter.txt")
        gau, tam = await gau_tam.communicate()
        gautam = gau.decode().strip()
        LOGGER.info(gau.decode())
        LOGGER.info(tam.decode())
        # os.remove("filter.txt")
        gauti = f"https://drive.google.com/file/d/{gautam}/view?usp=drivesdk"
        gjay = size(os.path.getsize(file_upload))
        button = []
        button.append([
            pyrogram.InlineKeyboardButton(text="☁️ CloudUrl ☁️",
                                          url=f"{gauti}")
        ])
        if INDEX_LINK:
            indexurl = f"{INDEX_LINK}/{os.path.basename(file_upload)}"
            tam_link = requests.utils.requote_uri(indexurl)
            LOGGER.info(tam_link)
            button.append([
                pyrogram.InlineKeyboardButton(text="ℹ️ IndexUrl ℹ️",
                                              url=f"{tam_link}")
            ])
        button_markup = pyrogram.InlineKeyboardMarkup(button)

        await messa_ge.reply_text(
            f"🤖: Uploaded successfully `{os.path.basename(file_upload)}` <a href='tg://user?id={g_id}'>🤒</a>\n📀 Size: {gjay}",
            reply_markup=button_markup,
        )
        os.remove(file_upload)
        await del_it.delete()
    else:
        tt = os.path.join(destination, os.path.basename(file_upload))
        LOGGER.info(tt)
        t_am = [
            "rclone",
            "copy",
            "--config=rclone.conf",
            f"{file_upload}",
            f"{gUP}:{tt}",
            "-v",
        ]
        LOGGER.info(t_am)
        tmp = await asyncio.create_subprocess_exec(
            *t_am,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE)
        pro, cess = await tmp.communicate()
        LOGGER.info(pro.decode("utf-8"))
        LOGGER.info(cess.decode("utf-8"))
        g_file = re.escape(os.path.basename(file_upload))
        LOGGER.info(g_file)
        with open("filter1.txt", "w+", encoding="utf-8") as filter1:
            print(f"+ {g_file}/\n- *", file=filter1)

        g_a_u = [
            "rclone",
            "lsf",
            "--config=rclone.conf",
            "-F",
            "i",
            "--filter-from=filter1.txt",
            "--dirs-only",
            f"{gUP}:{destination}",
        ]
        gau_tam = await asyncio.create_subprocess_exec(
            *g_a_u,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE)
        # os.remove("filter1.txt")
        gau, tam = await gau_tam.communicate()
        gautam = gau.decode("utf-8")
        LOGGER.info(gautam)
        LOGGER.info(tam.decode("utf-8"))
        # os.remove("filter1.txt")
        gautii = f"https://drive.google.com/folderview?id={gautam}"
        gjay = size(getFolderSize(file_upload))
        LOGGER.info(gjay)
        button = []
        button.append([
            pyrogram.InlineKeyboardButton(text="☁️ CloudUrl ☁️",
                                          url=f"{gautii}")
        ])
        if INDEX_LINK:
            indexurl = f"{INDEX_LINK}/{os.path.basename(file_upload)}/"
            tam_link = requests.utils.requote_uri(indexurl)
            LOGGER.info(tam_link)
            button.append([
                pyrogram.InlineKeyboardButton(text="ℹ️ IndexUrl ℹ️",
                                              url=f"{tam_link}")
            ])
        button_markup = pyrogram.InlineKeyboardMarkup(button)

        await messa_ge.reply_text(
            f"🤖: Uploaded successfully `{os.path.basename(file_upload)}` <a href='tg://user?id={g_id}'>🤒</a>\n📀 Size: {gjay}",
            reply_markup=button_markup,
        )
        shutil.rmtree(file_upload)
        await del_it.delete()
Esempio n. 40
0
async def upload_to_gdrive(file_upload, message, messa_ge, g_id):
    await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
    del_it = await message.edit_text(
        f"<a href='tg://user?id={g_id}'>🔊</a> Now Uploading to ☁️ Cloud!!!")
    #subprocess.Popen(('touch', 'rclone.conf'), stdout = subprocess.PIPE)
    with open('rclone.conf', 'a', newline="\n", encoding='utf-8') as fole:
        fole.write("[DRIVE]\n")
        fole.write(f"{RCLONE_CONFIG}")
    destination = f'{DESTINATION_FOLDER}'
    if os.path.isfile(file_upload):
        g_au = [
            'rclone', 'copy', '--config=/app/rclone.conf',
            f'/app/{file_upload}', 'DRIVE:'
            f'{destination}', '-v'
        ]
        tmp = await asyncio.create_subprocess_exec(
            *g_au,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE)
        pro, cess = await tmp.communicate()
        LOGGER.info(pro.decode('utf-8'))
        LOGGER.info(cess.decode('utf-8'))
        gk_file = re.escape(file_upload)
        LOGGER.info(gk_file)
        with open('filter.txt', 'w+', encoding='utf-8') as filter:
            print(f"+ {gk_file}\n- *", file=filter)

        t_a_m = [
            'rclone', 'lsf', '--config=/app/rclone.conf', '-F', 'i',
            "--filter-from=/app/filter.txt", "--files-only", 'DRIVE:'
            f'{destination}'
        ]
        gau_tam = await asyncio.create_subprocess_exec(
            *t_a_m,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE)
        #os.remove("filter.txt")
        gau, tam = await gau_tam.communicate()
        LOGGER.info(gau)
        gautam = gau.decode("utf-8")
        LOGGER.info(gautam)
        LOGGER.info(tam.decode('utf-8'))
        #os.remove("filter.txt")
        gauti = f"https://drive.google.com/file/d/{gautam}/view?usp=drivesdk"
        gau_link = re.search("(?P<url>https?://[^\s]+)", gauti).group("url")
        LOGGER.info(gau_link)
        #indexurl = f"{INDEX_LINK}/{file_upload}"
        #tam_link = requests.utils.requote_uri(indexurl)
        gjay = size(os.path.getsize(file_upload))
        LOGGER.info(gjay)
        button = []
        button.append([
            pyrogram.InlineKeyboardButton(text="☁️ CloudUrl ☁️",
                                          url=f"{gau_link}")
        ])
        if INDEX_LINK:
            indexurl = f"{INDEX_LINK}/{file_upload}"
            tam_link = requests.utils.requote_uri(indexurl)
            LOGGER.info(tam_link)
            button.append([
                pyrogram.InlineKeyboardButton(text="ℹ️ IndexUrl ℹ️",
                                              url=f"{tam_link}")
            ])
        button_markup = pyrogram.InlineKeyboardMarkup(button)
        await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
        await messa_ge.reply_text(
            f"🤖: {file_upload} has been Uploaded successfully to your Cloud <a href='tg://user?id={g_id}'>🤒</a>\n📀 Size: {gjay}",
            reply_markup=button_markup)
        #await message.edit_text(f"""🤖: {file_upload} has been Uploaded successfully to your cloud 🤒\n\n☁️ Cloud URL:  <a href="{gau_link}">FileLink</a>\nℹ️ Direct URL:  <a href="{tam_link}">IndexLink</a>""")
        os.remove(file_upload)
        await del_it.delete()
    else:
        tt = os.path.join(destination, file_upload)
        LOGGER.info(tt)
        t_am = [
            'rclone', 'copy', '--config=/app/rclone.conf',
            f'/app/{file_upload}', 'DRIVE:'
            f'{tt}', '-v'
        ]
        tmp = await asyncio.create_subprocess_exec(
            *t_am,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE)
        pro, cess = await tmp.communicate()
        LOGGER.info(pro.decode('utf-8'))
        LOGGER.info(cess.decode('utf-8'))
        g_file = re.escape(file_upload)
        LOGGER.info(g_file)
        with open('filter1.txt', 'w+', encoding='utf-8') as filter1:
            print(f"+ {g_file}/\n- *", file=filter1)

        g_a_u = [
            'rclone', 'lsf', '--config=/app/rclone.conf', '-F', 'i',
            "--filter-from=/app/filter1.txt", "--dirs-only", 'DRIVE:'
            f'{destination}'
        ]
        gau_tam = await asyncio.create_subprocess_exec(
            *g_a_u,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE)
        #os.remove("filter1.txt")
        gau, tam = await gau_tam.communicate()
        LOGGER.info(gau)
        gautam = gau.decode("utf-8")
        LOGGER.info(gautam)
        LOGGER.info(tam.decode('utf-8'))
        #os.remove("filter1.txt")
        gautii = f"https://drive.google.com/folderview?id={gautam}"
        gau_link = re.search("(?P<url>https?://[^\s]+)", gautii).group("url")
        LOGGER.info(gau_link)
        #indexurl = f"{INDEX_LINK}/{file_upload}/"
        #tam_link = requests.utils.requote_uri(indexurl)
        #print(tam_link)
        gjay = size(getFolderSize(file_upload))
        LOGGER.info(gjay)
        button = []
        button.append([
            pyrogram.InlineKeyboardButton(text="☁️ CloudUrl ☁️",
                                          url=f"{gau_link}")
        ])
        if INDEX_LINK:
            indexurl = f"{INDEX_LINK}/{file_upload}/"
            tam_link = requests.utils.requote_uri(indexurl)
            LOGGER.info(tam_link)
            button.append([
                pyrogram.InlineKeyboardButton(text="ℹ️ IndexUrl ℹ️",
                                              url=f"{tam_link}")
            ])
        button_markup = pyrogram.InlineKeyboardMarkup(button)
        await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
        await messa_ge.reply_text(
            f"🤖: Folder has been Uploaded successfully to {tt} in your Cloud <a href='tg://user?id={g_id}'>🤒</a>\n📀 Size: {gjay}",
            reply_markup=button_markup)
        #await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
        #await messa_ge.reply_text(f"""🤖: Folder has been Uploaded successfully to {tt} in your cloud 🤒\n\n☁️ Cloud URL:  <a href="{gau_link}">FolderLink</a>\nℹ️ Index Url:. <a href="{tam_link}">IndexLink</a>""")
        shutil.rmtree(file_upload)
        await del_it.delete()
Esempio n. 41
0
def model_size(variables):
  """Get model size."""
  total_params = sum(
      [np.prod(var.shape.as_list()) * var.dtype.size for var in variables])
  return hfsize.size(total_params, system=hfsize.alternative)
Esempio n. 42
0
 def bot_memory_usage() -> str:
     return "{} ({:.3f}%)".format(size(StatHelpers._raw_bot_memory_usage()), StatHelpers._raw_bot_per_used_memory())
Esempio n. 43
0
	def VideoLoad(self):
		self.VideoList.setEnabled(True)
		for i in self.video.videostreams:
			self.VideoList.addItem(i.resolution+' ('+size(i.get_filesize())+') '+i.extension)
Esempio n. 44
0
def home():
    values = psutil.virtual_memory()
    return size(values.available)
Esempio n. 45
0
 def sys_memory_usage() -> str:
     return size(StatHelpers._raw_sys_memory_usage())
Esempio n. 46
0
def get_disk():
    total, used, free, percent = psutil.disk_usage('/')
    total_space['text'] = size(total)
    used_space['text'] = size(used)
    free_space['text'] = size(free)
    percentage_space['text'] = str(percent) + '%'
Esempio n. 47
0
    folder = str(time.time()) + "/"
else:
    folder = folder + "/"

os.makedirs(str(folder), exist_ok=True)
print('Files will be saved in', folder)

threadSource = requests.get(url)
thread = threadSource.text
soup = BeautifulSoup(thread, 'html.parser')

for link in soup.findAll('a', {'class': 'fileThumb'}):
    imageUrl = 'https:' + link.get('href')
    imageName = imageUrl[-5:]
    print(imageUrl)
    name = time.time()
    fullName = str(name) + imageName
    pathName = str(folder) + str(fullName)
    urllib.request.urlretrieve(imageUrl, pathName)

totalSize = 0
for path, dirs, files in os.walk(folder):
    for f in files:
        fp = os.path.join(path, f)
        totalSize += os.path.getsize(fp)
print(Back.CYAN + size(totalSize, system=alternative),
      'downloaded, files have been saved in', folder)
print(Style.RESET_ALL)
print(Fore.WHITE + Back.GREEN + 'Success !')
print(Style.RESET_ALL)
Esempio n. 48
0
def main():
    args = sys.argv[1:]
    if len(args) < 2:
        return usage()

    src = args[0]
    num = int(args[1])
    jobs_created = set(range(1, num + 1))

    log = src
    logdir = src.split('/')[-1]

    if logdir in [
            'rootfile_data3650', 'rootfile_inclusiveMC', 'event',
            'Data12_event', 'rootfile', 'Data12', 'data', 'mc_psip12',
            'con3650', 'data09', 'mc_psip09'
    ]:
        logfiletype = 'BossLogFile'
    elif logdir == 'events':
        logfiletype = 'EventsLogFile'
    else:
        raise NameError(logdir)

    log = log.replace(logdir, 'log/%s' % logdir)

    sys.stdout.write('Scanning %s...\n' % src)

    file_list = []
    total_size = 0

    for root, dirs, files in os.walk(src):
        for f in files:
            file_list.append(int(f.split('-')[-1].split('.')[0]))
            total_size = total_size + os.path.getsize(os.path.join(root, f))

    sys.stdout.write('Checking log files...\n')
    jobs_not_terminated = []
    num_logfiles = []
    for root, dirs, files in os.walk(log):
        num_logfiles = files
        if len(files) == 0:
            sys.stdout.write('No log files found!\n')
            break
        for f in files:
            if logfiletype == 'BossLogFile':
                l = BossLogFile(os.path.join(root, f))
            elif logfiletype == 'EventsLogFile':
                l = EventsLogFile(os.path.join(root, f))
            else:
                raise NameError(logfiletype)

            if not l.terminated:
                sys.stdout.write('%s ... Not OK.\n' % f)
                job = f.split('-')[-1]
                jobs_not_terminated.append(job)
            else:
                sys.stdout.write('%s ... OK.\n' % f)

    sys.stdout.write('\nFound %s files, with total size %s.\n' %
                     (len(file_list), size(total_size)))

    if len(jobs_not_terminated) > 0:
        sys.stdout.write(
            'Non-terminated jobs are (%s): %s\n' %
            (len(jobs_not_terminated), ','.join(jobs_not_terminated)))
    elif len(num_logfiles) > 0:
        sys.stdout.write('All finished jobs are terminated correctly. \n')

    if len(file_list) < num:
        jobs_missing = jobs_created.difference(file_list)
        jobs_missing = [str(li) for li in jobs_missing]
        sys.stdout.write('Missing jobs are (%s): %s\n' %
                         (len(jobs_missing), ','.join(jobs_missing)))
    else:
        sys.stdout.write('No missing jobs.\n')
Esempio n. 49
0
	def AudioLoad(self):
		self.AudioList.setEnabled(True)
		for i in self.video.audiostreams:
			self.AudioList.addItem(i.bitrate+' ('+size(i.get_filesize())+') '+i.extension)
    def main(self):
        # remove trailing slash
        self._artifactory_server = self._artifactory_server.rstrip("/")
        if self._remove_empty_folder:
            rules = [
                CleanupPolicy(
                    "Cleaning up empty folders in local repositories",
                    delete_empty_folder(),
                )
            ]
        else:
            try:
                self._config = self._config.replace(".py", "")
                sys.path.append(".")
                rules = getattr(importlib.import_module(self._config), "RULES")
            except ImportError as error:
                print("Error: {}".format(error))
                exit(1)

        self._destroy_or_verbose()

        artifactory_session = requests.Session()
        artifactory_session.auth = HTTPBasicAuth(self._user, self._password)

        # Validate that all rules is CleanupPolicy
        for cleanup_rule in rules:
            if not isinstance(cleanup_rule, CleanupPolicy):
                sys.exit(
                    "Rule '{}' is not CleanupPolicy, check this please".format(
                        cleanup_rule))

        if self._policy_name:
            rules = [rule for rule in rules if self._policy_name in rule.name]
            if not rules:
                sys.exit("Rule with name '{}' does not found".format(
                    self._policy_name))

        table = PrettyTable()
        table.field_names = ["Cleanup Policy", "Files count", "Size"]
        table.align["Cleanup Policy"] = "l"
        total_size = 0

        for cleanup_rule in rules:  # type: CleanupPolicy
            with TC.block(cleanup_rule.name):
                cleanup_rule.init(artifactory_session,
                                  self._artifactory_server)

                # prepare
                with TC.block("AQL filter"):
                    cleanup_rule.aql_filter()

                # Get artifacts
                with TC.block("Get artifacts"):
                    print("*" * 80)
                    print("AQL Query:")
                    print(cleanup_rule.aql_text)
                    print("*" * 80)
                    artifacts = cleanup_rule.get_artifacts()
                print("Found {} artifacts".format(len(artifacts)))

                # Filter
                with TC.block("Filter results"):
                    artifacts_to_remove = cleanup_rule.filter(artifacts)
                print("Found {} artifacts AFTER filtering".format(
                    len(artifacts_to_remove)))

                # Delete or debug
                for artifact in artifacts_to_remove:
                    # test name for teamcity
                    repo_underscore = (artifact["repo"].replace(".",
                                                                "_").replace(
                                                                    "/", "_"))
                    path_underscore = (artifact["path"].replace(".",
                                                                "_").replace(
                                                                    "/", "_"))
                    name_underscore = (artifact["name"].replace(".",
                                                                "_").replace(
                                                                    "/", "_"))
                    test_name = "cleanup.{}.{}_{}".format(
                        repo_underscore, path_underscore, name_underscore)

                    # Use teamcity test for output all removed artifact. If local - use suppress output
                    ctx_mgr = (TC.test(test_name)
                               if is_running_under_teamcity() else
                               contextlib.suppress())
                    with ctx_mgr:
                        cleanup_rule.delete(artifact, destroy=self._destroy)

            # Info
            count_artifacts = len(artifacts_to_remove)
            print("Deleted artifacts count: {}".format(count_artifacts))
            try:
                artifacts_size = sum([x["size"] for x in artifacts_to_remove])
                total_size += artifacts_size
                artifacts_size = size(artifacts_size)
                print("Summary size: {}".format(artifacts_size))

                table.add_row(
                    [cleanup_rule.name, count_artifacts, artifacts_size])
            except KeyError:
                print("Summary size not defined")
            print()

        table.add_row(["", "", ""])
        table.add_row(["Total size: {}".format(size(total_size)), "", ""])
        print(table)
    def download_link(self, **kwargs):
        # global form,main_url
        if kwargs:
            CurrentDir = os.path.dirname(os.path.realpath(__file__)).replace(
                '\\', '/')
            if 'link' in kwargs:
                url0 = kwargs['link']
                if type(url0) is list:
                    url = url0[0]
                else:
                    if url0[:4] == 'www.':
                        url = 'http://' + url0[4:]
                    else:
                        url = url0
            else:
                url = ''
            if 'html' in kwargs: html = kwargs['html']
            else: html = ''
            if 'proxy' in kwargs: pr = kwargs['proxy']
            else: pr = ''
            if 'user_pass' in kwargs: us_pss = kwargs['user_pass']
            else: us_pss = ''
            if 'pdfdir' in kwargs: pdf_download_location = kwargs['pdfdir']
            else: pdf_download_location = CurrentDir + '/PDF_Files'
            if 'water_pdfdir' in kwargs: wat_locatton = kwargs['water_pdfdir']
            else: wat_locatton = CurrentDir + '/Watermarked_PDF_Files'
            if 'root' in kwargs: root = kwargs['root']
            else: root = CurrentDir
            if 'need_watermarker' in kwargs:
                need_watermarker = kwargs['need_watermarker']
            else:
                need_watermarker = True
            if 'server' in kwargs: server_cdn = kwargs['server']
            else: server_cdn = ''
            if 'cookies' in kwargs: cookies = kwargs['cookies']
            else: cookies = ''
            if 'ftp_upload' in kwargs: ftp_upload = kwargs['ftp_upload']
            else: ftp_upload = ''
            if 'log_out' in kwargs:
                log_out = kwargs['log_out']
                if log_out != '':
                    link = {'link': url, 'log_out': log_out}
                else:
                    link = url
                    log_out = ''
            else:
                link = url
                log_out = ''
        try:

            url_watermark = kwargs['url_watermark']
        except:
            # url_watermark='Please insert Your url to add as watermark'
            url_watermark = 'www.free-peprs.elasa.ir'

        done = 0
        try:
            main_url = kwargs['main_url']

        except:
            main_url = url

        # link,proxy,user_pass = Find_Link(url).find_link(pr,us_pss)
        # link,proxy,user_pass=self.valid_link()
        # link=url

        if link != [] and link != None:
            if main_url != url:
                data_base_host = str(urlparse2(main_url).hostname)
                try:
                    ez_host = str(urlparse2(url).hostname)
                except:
                    ez_host = str(urlparse2(url[0]).hostname)
                try:
                    base_url = 'http://' + data_base_host
                    file_name_link = base_url + url.split(ez_host)[1]
                except:
                    base_url = 'http://' + data_base_host
                    try:
                        file_name_link = base_url + url[0].split(ez_host)[1]
                    except:
                        file_name_link = url
            else:
                file_name_link = url

            os.chdir(CurrentDir)
            # file_name = self.Find_Link(file_name_link).find_name(pdf_download_location,wat_locatton)
            # file_name.url_watermark=url_watermark

            # [html,proxy,user_pass,cookies]=self.Find_Link(link,pdfdir=pdf_download_location,water_pdfdir=wat_locatton,cookies=cookies).dowload_basePr_userpass(pr,us_pss,cookies)
            if (not (html.endswith('.pdf'))) and (html[:4] != '%PDF' or len(
                    re.findall('%%EOF', html)) == 0):
                [html, proxy, user_pass, cookies] = self.Find_Link(
                    main_url,
                    pdfdir=pdf_download_location,
                    water_pdfdir=wat_locatton,
                    cookies=cookies).dowload_basePr_userpass(pr,
                                                             us_pss,
                                                             cookies,
                                                             url=link)
            else:
                proxy = pr
                user_pass = us_pss
            try:
                if os.path.isfile(html):
                    file_is = 1
                else:
                    file_is = 0
            except:
                file_is = 0
            if (html != [] and html[:4] == '%PDF') or file_is == 1:
                PDF_File = import_mod(from_module='save_source',
                                      from_module2='PDF_File')
                if not (html.endswith('.pdf')):

                    # from save_source import PDF_File
                    file_name = self.Find_Link(file_name_link).find_name(
                        pdf_download_location, wat_locatton)
                    # file_name['url_watermark']=url_watermark
                    file_name.url_watermark = url_watermark
                else:
                    os.remove(cookies)
                    file_name = self.Find_Link(file_name_link).find_name(
                        pdf_download_location, wat_locatton)
                    file_name.filename = html.split('/')[-1]
                    # file_name.pdf_Folder_filename=file_name.pdf_Folder_filename.split('/')[-1]
                    file_name.url_watermark = url_watermark
                # file_name = PDF_File(link,pdf_download_location,wat_locatton).filename(link)
                # file_name = self.Find_Link(link).find_name(pdf_download_location,wat_locatton)

                if not need_watermarker == False:  #need wtaremarker is ok
                    os.chdir(CurrentDir)
                    if not os.path.isdir(pdf_download_location):
                        os.mkdir(pdf_download_location)
                    if not os.path.isdir(wat_locatton):
                        os.mkdir(wat_locatton)
                    pdf_dw_dir, pdf_dw_Wr_dir = PDF_File(
                        url, pdf_download_location,
                        wat_locatton).finall_file_saving(html,
                                                         file_name,
                                                         pdf_download_location,
                                                         no_watermarker=0)
                    # photo=PDF_File(url,pdf_download_location,wat_locatton).pdf_to_image(pdf=pdf_dw_dir,pages=0)
                    pdf_size = size(os.path.getsize(pdf_dw_dir))

                    pdf_dw_li = self.path2url(file_name.pdf_Folder_filename,
                                              server_cdn,
                                              pdf_download_location, root)
                    if file_is == 1 and html.endswith('.pdf'):
                        wt_pdf_size = size(os.path.getsize(pdf_dw_Wr_dir))
                        pdf_dw_Wr_li = self.path2url(
                            file_name.W_pdf_Folder_filename, server_cdn,
                            wat_locatton, root)
                    elif file_is == 1 and not html.endswith('.pdf'):
                        wt_pdf_size = pdf_size
                        pdf_dw_Wr_li = pdf_dw_li

                    else:
                        wt_pdf_size = size(os.path.getsize(pdf_dw_Wr_dir))
                        pdf_dw_Wr_li = self.path2url(
                            file_name.W_pdf_Folder_filename, server_cdn,
                            wat_locatton, root)

                    try:
                        os.remove(cookies)
                    except:
                        pass
                    print "fetching main paper link url ...\n\t%s" % pdf_dw_li[:]
                    print "fetching waterarker paper link url ...\n\t%s" % pdf_dw_Wr_li
                else:
                    if not os.path.isdir(pdf_download_location):
                        os.mkdir(pdf_download_location)

                    pdf_dw_dir, pdf_dw_Wr_dir = PDF_File(
                        url, pdf_download_location,
                        wat_locatton).finall_file_saving(html,
                                                         file_name,
                                                         pdf_download_location,
                                                         no_watermarker=1)
                    pdf_size = size(os.path.getsize(pdf_dw_dir))
                    # pdf_size=len(html)/1024 #in kbit
                    wt_pdf_size = ''
                    pdf_dw_li = self.path2url(file_name.pdf_Folder_filename,
                                              server_cdn,
                                              pdf_download_location, root)
                    print "fetching main paper link url ...\n\t%s" % pdf_dw_li[:]
                    pdf_dw_Wr_li = "No watter marker requested my be becuase of big size or lack of time"
                    print "fetching waterarker paper link url ...\n\t%s" % pdf_dw_Wr_li
                done = 1
                if ftp_upload == '1':
                    public_url = 'None'

                    if need_watermarker == True:  #need wtaremarker is ok
                        public_url = self.upload_file(
                            water_pdfdir=pdf_dw_Wr_dir, METODE='FTP')
                    else:
                        public_url = self.upload_file(water_pdfdir=pdf_dw_li,
                                                      METODE='FTP')
                else:
                    public_url = 'None'
                if public_url != 'None':
                    # try:
                    #     file=open(pdf_dw_dir);
                    #     file.close()
                    #     file=open(pdf_dw_Wr_dir);
                    #     file.close()
                    # except:
                    #     print 'pdfs are closed and reasy to removed from loal host!'
                    # os.close(pdf_dw_dir);os.close(pdf_dw_Wr_dir);
                    os.remove(pdf_dw_dir)
                    os.remove(pdf_dw_Wr_dir)
                    pdf_dw_Wr_li = public_url
                    pdf_dw_li = public_url
                else:
                    public_url = pdf_dw_Wr_li
                address = {
                    'url': str(url),
                    'pdf_name': file_name.filename,
                    'W_pdf_name': file_name.filename,
                    'W_pdf_local': wat_locatton,
                    'pdf_size': pdf_size,
                    'wt_pdf_size': wt_pdf_size,
                    'pdf_dir': pdf_dw_dir,
                    'wt_pdf_dir': pdf_dw_Wr_dir,
                    'pdf_dw_li': pdf_dw_li,
                    "pdf_dw_Wr_li": pdf_dw_Wr_li,
                    'public_url': public_url,
                    'proxy_worked': proxy,
                    'user_pass_worked': user_pass
                }
                return address

            # elif os.path.isfile(html):

            elif html[:4] != "%PDF" and html != []:

                print 'file is not in PDF Format do you want to make a save it as html file'
                print 'format is ' + html[:4]
                print '*************html is :***********\n\n'
                print html
                print '************* end of html :***********\n\n'
                print '\n file link which found is :\n' + link + '\nbut file can not be downloaded '
            else:
                print 'file link which found is :\n'
                #print str(link['link']);
                print 'but file can not be downloaded '

        if done == 0:
            print 'we are unable to download from this address because can not find proper link '
            address = {
                'url': str(url),
                'pdf_dir': '',
                'pdf_size': '',
                'wt_pdf_size': '',
                'wt_pdf_dir': '',
                'pdf_dw_li': '',
                "pdf_dw_Wr_li": '',
                'public_url': '',
                'proxy_worked': '',
                'user_pass_worked': ''
            }
            return address
    port_statistics[switch_port]['BytesReceived'] = nbytes_rx
    switch_port += 1

# Bytes Sent
tx_cells = soup.select('input[name="txpkt"]')
switch_port = 1
for cell in tx_cells:
    # Convert to bytes Hex to Decimal
    nbytes_tx = int(cell['value'].strip(), 16)
    port_statistics[switch_port]['BytesSent'] = nbytes_tx
    switch_port += 1

# CRC Error Packets
crc_cells = soup.select('input[name="crcPkt"]')
switch_port = 1
for cell in crc_cells:
    # Convert to bytes Hex to Decimal
    crc_packet = int(cell['value'].strip(), 16)
    port_statistics[switch_port]['CRCErrorPackets'] = crc_packet
    switch_port += 1

# Output in a human readable format
for port_id, stats in port_statistics.items():
    all_zero = all(v == 0 for v in stats.values())
    if not all_zero:
        nice_rx = size(stats["BytesReceived"])
        nice_tx = size(stats["BytesSent"])
        print(
            f'''Port {port_id}: [RX: {nice_rx}] [TX: {nice_tx}] [CRC: {stats["CRCErrorPackets"]}]'''
        )
Esempio n. 53
0
print
print "Sending..." if not args.dry_run else "Stats:"
srv = Client(server=args.radius_server,
             secret=args.radius_secret,
             dict=Dictionary(sys.path[0] + "/dictionary"))

if args.exclude_pattern:
    print "Exclusion check has been enabled."
    exclude_pattern = re.compile(args.exclude_pattern)

failed_usernames = []
for username, total_bytes in sum_bytes.iteritems():
    sys.stdout.write('  ' + username + ' ')

    try:
        sys.stdout.write(size(total_bytes))
    except NameError:
        sys.stdout.write(str(total_bytes))

    if args.dry_run:
        sys.stdout.write("\n")
        continue

    if args.exclude_pattern and exclude_pattern.search(username):
        sys.stdout.write("...skipped!\n")
        sys.stdout.flush()
        continue

    session_id = str(time.time())

    try:
Esempio n. 54
0
 def size(self):
     if self.type == 'folder':
         return None
     else:
         return size(float(self.s3Key.size), system=alternative)
Esempio n. 55
0
def action(twitter_username,
           word_type='all',
           regex_place='suffix',
           regex=None,
           mask=None,
           filename=None,
           urlfile=None):
    final_wordlist = []
    year_list = []
    city_list = []
    noun_phrases_display = []
    proper_nouns_display = []
    text = ''
    if twitter_username is not "none":
        try:
            print "Downloading tweets from: " + twitter_username
            text = get_all_tweets(twitter_username)
        except:
            print "Couldn't download tweets. Your credentials maybe wrong or you rate limited."
            sys.exit(1)
        print "Analyzing tweets, this will take a while.."
        print ""
    else:
        if filename is None and urlfile is None:
            print "No input source is specified"
            sys.exit(0)
        elif filename is not None and urlfile is None:
            print "Analyzing the text file.."
            print ""
            with open(filename, 'r') as textfile:
                text = textfile.read().replace('\n', '')
        elif filename is None and urlfile is not None:
            print "Analyzing the given URLs.."
            print ""
            with open(urlfile, 'r') as textfile:
                urls = textfile.read().splitlines()
                for url in urls:
                    print "Connecting to: " + url
                    try:
                        r = requests.get(url, timeout=20)
                    except:
                        print "Connection failed"
                        continue
                    soup = BeautifulSoup(r.text)
                    for script in soup(["script", "style"]):
                        script.decompose()
                    clean_text = soup.get_text()
                    lines = (line.strip() for line in clean_text.splitlines())
                    chunks = (phrase.strip() for line in lines
                              for phrase in line.split("  "))
                    clean_text = '\n'.join(chunk for chunk in chunks if chunk)
                    text += clean_text
    text = ''.join(i for i in text if ord(i) < 128)
    noun_phrases = find_noun_phrases(text)
    proper_nouns = find_proper_nouns(text)
    paired_nouns = mass_similarity_compare(noun_phrases)
    paired_propers = mass_similarity_compare(proper_nouns)
    for i in range(len(noun_phrases)):
        noun_phrases_display.append(
            str(noun_phrases[i][0]) + ":" + str(noun_phrases[i][1]))

    print(Fore.GREEN + 'Most used nouns: ' + Style.RESET_ALL +
          ", ".join(noun_phrases_display))
    for i in range(len(proper_nouns)):
        proper_nouns_display.append(
            str(proper_nouns[i][0]) + ":" + str(proper_nouns[i][1]))
    print(Fore.GREEN + 'Most used proper nouns: ' + Style.RESET_ALL +
          ", ".join(proper_nouns_display))
    print ""
    print "Gathering related locations and years.."
    print ""
    for noun in proper_nouns:
        print "Getting info for: " + str(noun)
        try:
            temp_city_list = get_cities(noun[0])[0:3]
        except:
            continue
        for city in temp_city_list:
            city_list.append(city.replace(" ", "").lower())
    if city_list:
        city_list = list(set(city_list))
        for city in city_list:
            city_tuple = (city, 0)
            proper_nouns.append(city_tuple)
    for word in noun_phrases:
        if mask is None:
            final_wordlist.append(word[0])
        else:
            final_wordlist.append(mask_parser(mask, word[0]))
    for word in proper_nouns:
        if mask is None:
            final_wordlist.append(word[0])
        else:
            final_wordlist.append(mask_parser(mask, word[0]))
        if word_type != 'base':
            try:
                year = get_year(word[0])
                if year != None:
                    if year not in year_list:
                        year_list.append(year)
            except:
                pass
    for word in paired_nouns:
        if mask is None:
            final_wordlist.append(word)
        else:
            final_wordlist.append(mask_parser(mask, word))
    for word in paired_propers:
        if mask is None:
            final_wordlist.append(word)
        else:
            final_wordlist.append(mask_parser(mask, word))
    if regex_place is not None or regex is not None:
        new_items = regex_parser(regex)
        for item in new_items:
            for word in final_wordlist:
                with open("regex_words.txt", "a+") as regex_words:
                    if regex_place == 'prefix':
                        regex_words.write(item + word + '\n')
                    else:
                        regex_words.write(word + item + '\n')
    with open(twitter_username + "_wordlist.txt", 'w+') as wordlist:
        for word in final_wordlist:
            wordlist.write(word + '\n')
        if word_type != 'base':
            for word in final_wordlist:
                for year in list(set(year_list)):
                    wordlist.write(word + year + '\n')
    if regex is not None:
        os.system('cat regex_words.txt >> ' + twitter_username +
                  "_wordlist.txt")
        os.remove('regex_words.txt')
    raster_size = os.path.getsize(twitter_username + "_wordlist.txt")
    print "Wordlist is written to: " + twitter_username + "_wordlist.txt"
    print "Size of the wordlist: " + size(raster_size, system=alternative)
    print "Number of lines in wordlist: " + str(
        file_len(twitter_username + "_wordlist.txt"))
Esempio n. 56
0
    def do_list(self, args):
        try:
            # call UForge API
            # get images
            printer.out("Getting all images and publications for [" +
                        self.login + "] ...")
            images = self.api.Users(self.login).Images.Getall()
            images = images.images.image
            # get publications
            pimages = self.api.Users(self.login).Pimages.Getall()
            pimages = pimages.publishImages.publishImage
            if images is None or len(images) == 0:
                printer.out("No images available")
            else:
                printer.out("Images:")
                table = Texttable(800)
                table.set_cols_dtype(
                    ["t", "t", "t", "t", "t", "t", "t", "c", "t"])
                table.header([
                    "Id", "Name", "Version", "Rev.", "Format", "Created",
                    "Size", "Compressed", "Generation Status"
                ])
                images = generics_utils.order_list_object_by(images, "name")
                for image in images:
                    imgStatus = self.get_image_status(image.status)
                    table.add_row([
                        image.dbId, image.name, image.version, image.revision,
                        image.targetFormat.name,
                        image.created.strftime("%Y-%m-%d %H:%M:%S"),
                        size(image.fileSize), "X" if image.compress else "",
                        imgStatus
                    ])
                print table.draw() + "\n"
                printer.out("Found " + str(len(images)) + " images")

            if pimages is None or len(pimages) == 0:
                printer.out("No publication available")
            else:
                printer.out("Publications:")
                table = Texttable(800)
                table.set_cols_dtype(["t", "t", "t", "t", "t", "t"])
                table.header([
                    "Template name", "Image ID", "Account name", "Format",
                    "Cloud ID", "Status"
                ])
                pimages = generics_utils.order_list_object_by(pimages, "name")
                for pimage in pimages:
                    pubStatus = self.get_publish_status(pimage.status)
                    table.add_row([
                        pimage.name,
                        generics_utils.extract_id(pimage.imageUri),
                        pimage.credAccount.name
                        if pimage.credAccount is not None else "-",
                        pimage.credAccount.targetPlatform.name,
                        pimage.cloudId if pimage.cloudId is not None else "-",
                        pubStatus
                    ])
                print table.draw() + "\n"
                printer.out("Found " + str(len(pimages)) + " publications")

            return 0
        except ArgumentParserError as e:
            printer.out("ERROR: In Arguments: " + str(e), printer.ERROR)
            self.help_list()
        except Exception as e:
            return handle_uforge_exception(e)
Esempio n. 57
0
conn.commit()

# cur.execute('SELECT * FROM ways_nodes')
# all_rows = cur.fetchall()
# print('5):')
# pprint(all_rows)
conn.close()

##find file size

dirpath = '/Users/wenjia.ma/Udacity/data-wrangling/openMap/csv'

files_list = []
for path, dirs, files in os.walk(dirpath):
    files_list.extend([(filename,
                        size(os.path.getsize(os.path.join(path, filename))))
                       for filename in files])
files_list.sort(
    key=lambda letter: ''.join([i for i in letter[1] if not i.isdigit()]),
    reverse=True)
files_list.sort(key=lambda size: int(size[1].translate(None, "MKB")),
                reverse=True)

for filename, size in files_list:
    print '{:.<40s}: {:5s}'.format(filename, size)

##find number of nodes:
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()

query = '''SELECT COUNT(*) as count FROM nodes '''
Esempio n. 58
0
    def print_all_images():
        from tools.objects import image_object
        from hurry.filesize import size

        images = []

        api = query_all_servers(head="GET", url=images_class.images_url)
        for host in api:
            for image in host:
                images.append(image_object(Containers=image['Containers'], Created=image['Created'], Id=image['Id'], Labels=image['Labels'], ParentId=image['ParentId'], RepoDigests=image['RepoDigests'][0], RepoTags=image['RepoTags'][0], SharedSize=image['SharedSize'], Size=size(image['Size']), VirtualSize=size(image['VirtualSize'])))
        return images
Esempio n. 59
0
# In[2]:

#df = df.sample(frac=.05)
#df.shape

# In[ ]:

importlib.reload(dataDecode)
decoder = dataDecode.decoder(base_path + '_ref/', debug=True)

pdf = df.progress_apply(decoder.decode___row, axis=1)

# In[ ]:

base_name = 'defunciones-deis-1998-2016-parsed-{}'.format(
    size(len(df)).replace('B', ''))

# In[ ]:

pdf.to_csv(save_path + base_name + '.csv', index=False)

# too much memmory
#pdf.to_excel(save_path + base_name + '.xlsx', index=False)

# In[ ]:

# Dtypes (you still need to edit this manually after)
dtypes = {}

for key in pdf.dtypes.to_dict().keys():
    dtypes[key] = str(pdf.dtypes[key])
Esempio n. 60
0
async def query_mgs(client: Bot, message: Message):
    if re.findall("((^\/|^,|^!|^\.|^[\U0001F600-\U000E007F]).*)",
                  message.text):
        return
    query_message = message.text
    info = await client.get_me()
    user_message.clear()
    if len(message.text) > 2:
        try:
            for channel in Config.CHANNELS:
                # Looking for Document type in messages
                async for messages in client.USER.search_messages(
                        channel, query_message, filter="document"):
                    doc_file_names = messages.document.file_name
                    file_size = size(messages.document.file_size)
                    if re.search(rf'\b{query_message}\b', doc_file_names,
                                 re.IGNORECASE):
                        try:
                            await client.send_chat_action(
                                chat_id=message.from_user.id,
                                action="upload_document")
                        except Exception:
                            query_bytes = query_message.encode("ascii")
                            base64_bytes = b64encode(query_bytes)
                            secret_query = base64_bytes.decode("ascii")
                            await client.send_message(
                                chat_id=message.chat.id,
                                text=Presets.ASK_PM_TEXT,
                                reply_to_message_id=message.message_id,
                                reply_markup=InlineKeyboardMarkup([[
                                    InlineKeyboardButton(
                                        "👉 START BOT 👈",
                                        url="t.me/{}?start={}".format(
                                            info.username, secret_query))
                                ]]))
                            return
                        media_name = messages.document.file_name.rsplit(
                            '.', 1)[0]
                        media_format = messages.document.file_name.split(
                            '.')[-1]
                        try:
                            await client.copy_message(
                                chat_id=message.from_user.id,
                                from_chat_id=messages.chat.id,
                                message_id=messages.message_id,
                                caption=Config.GROUP_U_NAME +
                                Presets.CAPTION_TEXT_DOC.format(
                                    media_name, media_format, file_size))
                        except FloodWait as e:
                            time.sleep(e.x)
                        user_message[id] = message.message_id
                # Looking for video type in messages
                async for messages in client.USER.search_messages(
                        channel, query_message, filter="video"):
                    vid_file_names = messages.caption
                    file_size = size(messages.video.file_size)
                    if re.search(rf'\b{query_message}\b', vid_file_names,
                                 re.IGNORECASE):
                        try:
                            await client.send_chat_action(
                                chat_id=message.from_user.id,
                                action="upload_video")
                        except Exception:
                            query_bytes = query_message.encode("ascii")
                            base64_bytes = b64encode(query_bytes)
                            secret_query = base64_bytes.decode("ascii")
                            await client.send_message(
                                chat_id=message.chat.id,
                                text=Presets.ASK_PM_TEXT,
                                reply_to_message_id=message.message_id,
                                reply_markup=InlineKeyboardMarkup([[
                                    InlineKeyboardButton(
                                        "👉 START BOT 👈",
                                        url="t.me/{}?start={}".format(
                                            info.username, secret_query))
                                ]]))
                            return
                        media_name = message.text.upper()
                        try:
                            await client.copy_message(
                                chat_id=message.from_user.id,
                                from_chat_id=messages.chat.id,
                                message_id=messages.message_id,
                                caption=Config.GROUP_U_NAME +
                                Presets.CAPTION_TEXT_VID.format(
                                    media_name, file_size))
                        except FloodWait as e:
                            time.sleep(e.x)
                        user_message[id] = message.message_id
        except Exception:
            try:
                await client.send_message(
                    chat_id=message.chat.id,
                    text=Presets.PM_ERROR,
                    reply_to_message_id=message.message_id,
                    reply_markup=InlineKeyboardMarkup([[
                        InlineKeyboardButton("👉 START BOT 👈",
                                             url="t.me/{}".format(
                                                 info.username))
                    ]]))
            except Exception:
                pass
            return
        if user_message.keys():
            try:
                await client.send_message(
                    chat_id=message.chat.id,
                    text=Presets.MEDIA_SEND_TEXT,
                    reply_to_message_id=user_message[id],
                    reply_markup=InlineKeyboardMarkup([[
                        InlineKeyboardButton("👉 Click Here To View 👈",
                                             url="t.me/{}".format(
                                                 info.username))
                    ]]))
                user_message.clear()
            except Exception:
                pass
        else:
            try:
                a = await client.send_message(
                    chat_id=message.chat.id,
                    text=Presets.NO_MEDIA.format(query_message),
                    reply_to_message_id=message.message_id,
                )
                time.sleep(30)
                await a.delete()
            except Exception:
                pass