def stream(self, stream, chunk_size=DEFAULT_CHUNK_SIZE, request=None): """ if a controller function:: return response.stream(file, 100) the file content will be streamed at 100 bytes at the time """ if isinstance(stream, (str, unicode)): stream_file_or_304_or_206(stream, chunk_size=chunk_size, request=request, headers=self.headers) # ## the following is for backward compatibility if hasattr(stream, "name"): filename = stream.name else: filename = None keys = [item.lower() for item in self.headers] if filename and not "content-type" in keys: self.headers["Content-Type"] = contenttype(filename) if filename and not "content-length" in keys: try: self.headers["Content-Length"] = os.path.getsize(filename) except OSError: pass if request and request.env.web2py_use_wsgi_file_wrapper: wrapped = request.env.wsgi_file_wrapper(stream, chunk_size) else: wrapped = streamer(stream, chunk_size=chunk_size) return wrapped
def stream( self, stream, chunk_size=10 ** 6, request=None, ): """ if a controller function > return response.stream(file,100) the file content will be streamed at 100 bytes at the time """ if isinstance(stream, str): stream_file_or_304_or_206(stream, request=request, chunk_size=chunk_size, headers=self.headers) # ## the following is for backward compatibility if hasattr(stream, 'name'): filename = stream.name else: filename = None keys = [item.lower() for item in self.headers.keys()] if filename and not 'content-type' in keys: self.headers['Content-Type'] = contenttype(filename) if filename and not 'content-length' in keys: self.headers['Content-Length'] = \ os.stat(filename)[stat.ST_SIZE] self.body = streamer(stream, chunk_size) return self.body
def stream(self, stream, chunk_size=DEFAULT_CHUNK_SIZE, request=None, attachment=False, filename=None): """ if a controller function:: return response.stream(file, 100) the file content will be streamed at 100 bytes at the time Optional kwargs: (for custom stream calls) attachment=True # Send as attachment. Usually creates a # pop-up download window on browsers filename=None # The name for the attachment Note: for using the stream name (filename) with attachments the option must be explicitly set as function parameter(will default to the last request argument otherwise) """ # for attachment settings and backward compatibility keys = [item.lower() for item in self.headers] if attachment: if filename is None: attname = "" else: attname = filename self.headers["Content-Disposition"] = "attachment;filename=%s" % attname if not request: request = current.request if isinstance(stream, (str, unicode)): stream_file_or_304_or_206(stream, chunk_size=chunk_size, request=request, headers=self.headers) # ## the following is for backward compatibility if hasattr(stream, "name"): filename = stream.name if filename and not "content-type" in keys: self.headers["Content-Type"] = contenttype(filename) if filename and not "content-length" in keys: try: self.headers["Content-Length"] = os.path.getsize(filename) except OSError: pass # Internet Explorer < 9.0 will not allow downloads over SSL unless caching is enabled if ( request.is_https and isinstance(request.env.http_user_agent, str) and not re.search(r"Opera", request.env.http_user_agent) and re.search(r"MSIE [5-8][^0-9]", request.env.http_user_agent) ): self.headers["Pragma"] = "cache" self.headers["Cache-Control"] = "private" if request and request.env.web2py_use_wsgi_file_wrapper: wrapped = request.env.wsgi_file_wrapper(stream, chunk_size) else: wrapped = streamer(stream, chunk_size=chunk_size) return wrapped
def stream( self, stream, chunk_size=10**6, request=None, ): """ if a controller function > return response.stream(file,100) the file content will be streamed at 100 bytes at the time """ if isinstance(stream, str): stream_file_or_304_or_206(stream, request=request, chunk_size=chunk_size, headers=self.headers) # ## the following is for backward compatibility if hasattr(stream, 'name'): filename = stream.name else: filename = None keys = [item.lower() for item in self.headers.keys()] if filename and not 'content-type' in keys: self.headers['Content-Type'] = contenttype(filename) if filename and not 'content-length' in keys: self.headers['Content-Length'] = \ os.stat(filename)[stat.ST_SIZE] self.body = streamer(stream, chunk_size) return self.body
def stream( self, stream, chunk_size = DEFAULT_CHUNK_SIZE, request=None, ): """ if a controller function:: return response.stream(file, 100) the file content will be streamed at 100 bytes at the time """ if not request: request = current.request if isinstance(stream, (str, unicode)): stream_file_or_304_or_206(stream, chunk_size=chunk_size, request=request, headers=self.headers) # ## the following is for backward compatibility if hasattr(stream, 'name'): filename = stream.name else: filename = None keys = [item.lower() for item in self.headers] if filename and not 'content-type' in keys: self.headers['Content-Type'] = contenttype(filename) if filename and not 'content-length' in keys: try: self.headers['Content-Length'] = \ os.path.getsize(filename) except OSError: pass # Internet Explorer < 9.0 will not allow downloads over SSL unless caching is enabled if request.is_https and isinstance(request.env.http_user_agent,str) and \ not re.search(r'Opera', request.env.http_user_agent) and \ re.search(r'MSIE [5-8][^0-9]', request.env.http_user_agent): self.headers['Pragma'] = 'cache' self.headers['Cache-Control'] = 'private' if request and request.env.web2py_use_wsgi_file_wrapper: wrapped = request.env.wsgi_file_wrapper(stream, chunk_size) else: wrapped = streamer(stream, chunk_size=chunk_size) return wrapped
def stream( self, stream, chunk_size=DEFAULT_CHUNK_SIZE, request=None, ): """ if a controller function:: return response.stream(file, 100) the file content will be streamed at 100 bytes at the time """ if not request: request = current.request if isinstance(stream, (str, unicode)): stream_file_or_304_or_206(stream, chunk_size=chunk_size, request=request, headers=self.headers) # ## the following is for backward compatibility if hasattr(stream, 'name'): filename = stream.name else: filename = None keys = [item.lower() for item in self.headers] if filename and not 'content-type' in keys: self.headers['Content-Type'] = contenttype(filename) if filename and not 'content-length' in keys: try: self.headers['Content-Length'] = \ os.path.getsize(filename) except OSError: pass # Internet Explorer < 9.0 will not allow downloads over SSL unless caching is enabled if request.is_https and isinstance(request.env.http_user_agent,str) and \ not re.search(r'Opera', request.env.http_user_agent) and \ re.search(r'MSIE [5-8][^0-9]', request.env.http_user_agent): self.headers['Pragma'] = 'cache' self.headers['Cache-Control'] = 'private' if request and request.env.web2py_use_wsgi_file_wrapper: wrapped = request.env.wsgi_file_wrapper(stream, chunk_size) else: wrapped = streamer(stream, chunk_size=chunk_size) return wrapped
def classifyTwitterAcc(): data = request.get_json() tweets = streamer(data["twitterId"], data["numTweets"]).readUserAccount() predictions, probability = model.classify(tweets) hate = 0 nonHate = 0 labels = ["<50%", "50-60%", "60-70%", "70-80%", ">80%"] data = [0, 0, 0, 0, 0] for i in range(len(predictions)): if predictions[i] == 1: hate += 1 if probability[i][1] < 0.60: temp = data[1] data[1] = temp + 1 elif probability[i][1] < 0.70: temp = data[2] data[2] = temp + 1 elif probability[i][1] < 0.80: temp = data[3] data[3] = temp + 1 else: temp = data[4] data[4] = temp + 1 else: nonHate += 1 temp = data[0] data[0] = temp + 1 percent = (hate / (hate + nonHate)) * 100 out = "" if percent > 25: out = "Yes" else: out = "No" output = { "pred": out, "prob": str(percent) + "%", "labels": labels, "data": data } return json.dumps(output), 200
def stream( self, stream, chunk_size=DEFAULT_CHUNK_SIZE, request=None, ): """ if a controller function:: return response.stream(file, 100) the file content will be streamed at 100 bytes at the time """ if isinstance(stream, (str, unicode)): stream_file_or_304_or_206(stream, chunk_size=chunk_size, request=request, headers=self.headers) # ## the following is for backward compatibility if hasattr(stream, 'name'): filename = stream.name else: filename = None keys = [item.lower() for item in self.headers] if filename and not 'content-type' in keys: self.headers['Content-Type'] = contenttype(filename) if filename and not 'content-length' in keys: try: self.headers['Content-Length'] = \ os.path.getsize(filename) except OSError: pass if request and request.env.web2py_use_wsgi_file_wrapper: wrapped = request.env.wsgi_file_wrapper(stream, chunk_size) else: wrapped = streamer(stream, chunk_size=chunk_size) return wrapped
def stream(self, stream, chunk_size=DEFAULT_CHUNK_SIZE, request=None, attachment=False, filename=None): """ if a controller function:: return response.stream(file, 100) the file content will be streamed at 100 bytes at the time Optional kwargs: (for custom stream calls) attachment=True # Send as attachment. Usually creates a # pop-up download window on browsers filename=None # The name for the attachment Note: for using the stream name (filename) with attachments the option must be explicitly set as function parameter(will default to the last request argument otherwise) """ headers = self.headers # for attachment settings and backward compatibility keys = [item.lower() for item in headers] if attachment: if filename is None: attname = "" else: attname = filename headers["Content-Disposition"] = \ "attachment;filename=%s" % attname if not request: request = current.request if isinstance(stream, (str, unicode)): stream_file_or_304_or_206(stream, chunk_size=chunk_size, request=request, headers=headers, status=self.status) # ## the following is for backward compatibility if hasattr(stream, 'name'): filename = stream.name if filename and not 'content-type' in keys: headers['Content-Type'] = contenttype(filename) if filename and not 'content-length' in keys: try: headers['Content-Length'] = \ os.path.getsize(filename) except OSError: pass env = request.env # Internet Explorer < 9.0 will not allow downloads over SSL unless caching is enabled if request.is_https and isinstance(env.http_user_agent, str) and \ not re.search(r'Opera', env.http_user_agent) and \ re.search(r'MSIE [5-8][^0-9]', env.http_user_agent): headers['Pragma'] = 'cache' headers['Cache-Control'] = 'private' if request and env.web2py_use_wsgi_file_wrapper: wrapped = env.wsgi_file_wrapper(stream, chunk_size) else: wrapped = streamer(stream, chunk_size=chunk_size) return wrapped
import streamer as streamerbsp import listener as listenerbsp import time import pandas as pd # root="/Users/larshelin/Documents/Studium/Master/Semester 3/Seminar/Data/oneweekfrom20130107.csv" ip="127.0.0.1" speed=2 #Index of 7.1. 8 AM startIndex = 56975 events = 10000 #lt_1 = time.strftime("%d.%m.%Y %H:%M:%S") #print lt_1 #lt_1=pd.to_datetime(lt_1) #listener_ref = listenerbsp.listener(ip,5005) streamer_ref = streamerbsp.streamer(ip,5005,root) streamer_ref.offset(startIndex, events) streamer_ref.start(speed) #listener_ref.start #lt_2 = time.strftime("%d.%m.%Y %H:%M:%S") #print lt_2 #lt_2 = pd.to_datetime(lt_2) #diff= (lt_2 - lt_1).seconds #print 'Streamer Arbeitszeit = ' , diff , " Sekunden"
def main(input_filename, overwrite=False, append=False): """ :param input_filename: KWIK or .prm file. :param overwrite: overwrite destination file if it exists. :param append: append changed clusters to existing file. :return: """ ex = os.path.splitext(input_filename)[1] input_dir = os.path.split(input_filename)[0] if overwrite and append: raise ValueError( u'Post sorting can not be run with both --overwrite and --append flags' ) if ex.lower() == u'.prm': prms = get_params(input_filename) if isinstance(prms[u'raw_data_files'], dict): input_filebases = [ prms[u'experiment_name'] + u'_rec_' + x for x in prms[u'raw_data_files'] ] input_kwiks = [ os.path.join(input_dir, x) + u'.kwik' for x in input_filebases ] elif ex.lower() == u'.kwik': #only a single kwik to deal with. # input_kwiks = [input_filename] t = os.path.split(input_filename)[1] input_filebases = [os.path.splitext(t)[0]] else: raise ValueError( u'Input filename was an unknown type: must be .kwik or .prm file.') # check that input files exist in the directory that we expect. filenames = [] for fb in input_filebases: fbs = {} logging.debug(u'Checking for files with filebase {0:s}'.format(fb)) for ext in (u'.raw.kwd', u'.kwik'): file = os.path.join(input_dir, fb) + ext fbs[ext] = file if not os.path.exists(file): raise FileException(u'File does not exist: {0:s}'.format(file)) else: logging.debug(u'File {0:s} exists!'.format(file)) fbs[u'destination'] = os.path.join(input_dir, fb) + u'.h5' logging.info(u'All {0:s} input files present.'.format(fb)) filenames.append(fbs) # check that files don't exist or that we're either appending or overwriting them. Do this first, so that we can # throw an exemption before we waste time processing any of the files. for file in destination_filenames: for files in filenames: file = files[u'destination'] if os.path.exists(file): logging.debug(u'Destination file exists: %s' % file) if not overwrite and not append: logging.error( u'Destination file exists: {0:s} no overwrite or append parameters were passed aborting.' .format(file)) if len(filenames) > 1: raise FileException( u'Destination file %s already exists. Please use --overwrite or --append to ' u'reprocess or update file.') # if things are ok, put this thing together. for files in filenames: file = files[u'destination'] if os.path.exists(file) and append: with tb.open_file(file, 'a') as dest_file: d_mod = dest_file.get_node_attr(u'/clusters', u'kwik_mod_time') if d_mod >= os.path.getmtime(files['.kwik']): logging.info( u'Existing file contains most recent kwik data, skipping..' .format(file)) else: logging.info(u'Updating kwik data for {0:s}'.format(file)) clusterer(files['.kwik'], dest_file) elif (os.path.exists(file) and overwrite) or not os.path.exists(file): logging.info(u'Creating destination file: {0}'.format(file)) with tb.open_file(file, 'w') as dest_file: clusterer(files['.kwik'], dest_file) eventer(files['.raw.kwd'], dest_file) streamer(files['.raw.kwd'], dest_file) else: raise Exception( u'File already exists and no overwrite parameter was provided:' u'\n\tFile: {0}' u'\n\tfile exists = {1}' u'\n\tappend = {2}' u'\n\toverwrite = {3}.'.format(file, os.path.exists(file), append, overwrite))
def main(input_filename, overwrite=False, append=False): """ :param input_filename: KWIK or .prm file. :param overwrite: overwrite destination file if it exists. :param append: append changed clusters to existing file. :return: """ ex = os.path.splitext(input_filename)[1] input_dir = os.path.split(input_filename)[0] if overwrite and append: raise ValueError(u'Post sorting can not be run with both --overwrite and --append flags') if ex.lower() == u'.prm': prms = get_params(input_filename) if isinstance(prms[u'raw_data_files'], dict): input_filebases = [prms[u'experiment_name'] + u'_rec_' + x for x in prms[u'raw_data_files']] input_kwiks = [os.path.join(input_dir, x) + u'.kwik' for x in input_filebases] elif ex.lower() == u'.kwik': #only a single kwik to deal with. # input_kwiks = [input_filename] t = os.path.split(input_filename)[1] input_filebases = [os.path.splitext(t)[0]] else: raise ValueError(u'Input filename was an unknown type: must be .kwik or .prm file.') # check that input files exist in the directory that we expect. filenames = [] for fb in input_filebases: fbs = {} logging.debug(u'Checking for files with filebase {0:s}'.format(fb)) for ext in (u'.raw.kwd', u'.kwik'): file = os.path.join(input_dir, fb) + ext fbs[ext] = file if not os.path.exists(file): raise FileException(u'File does not exist: {0:s}'.format(file)) else: logging.debug(u'File {0:s} exists!'.format(file)) fbs[u'destination'] = os.path.join(input_dir, fb) + u'.h5' logging.info(u'All {0:s} input files present.'.format(fb)) filenames.append(fbs) # check that files don't exist or that we're either appending or overwriting them. Do this first, so that we can # throw an exemption before we waste time processing any of the files. for file in destination_filenames: for files in filenames: file = files[u'destination'] if os.path.exists(file): logging.debug(u'Destination file exists: %s' % file) if not overwrite and not append: logging.error( u'Destination file exists: {0:s} no overwrite or append parameters were passed aborting.'.format( file)) if len(filenames) > 1: raise FileException(u'Destination file %s already exists. Please use --overwrite or --append to ' u'reprocess or update file.') # if things are ok, put this thing together. for files in filenames: file = files[u'destination'] if os.path.exists(file) and append: with tb.open_file(file, 'a') as dest_file: d_mod = dest_file.get_node_attr(u'/clusters', u'kwik_mod_time') if d_mod >= os.path.getmtime(files['.kwik']): logging.info(u'Existing file contains most recent kwik data, skipping..'.format(file)) else: logging.info(u'Updating kwik data for {0:s}'.format(file)) clusterer(files['.kwik'], dest_file) elif (os.path.exists(file) and overwrite) or not os.path.exists(file): logging.info(u'Creating destination file: {0}'.format(file)) with tb.open_file(file, 'w') as dest_file: clusterer(files['.kwik'], dest_file) eventer(files['.raw.kwd'], dest_file) streamer(files['.raw.kwd'], dest_file) else: raise Exception(u'File already exists and no overwrite parameter was provided:' u'\n\tFile: {0}' u'\n\tfile exists = {1}' u'\n\tappend = {2}' u'\n\toverwrite = {3}.'.format(file, os.path.exists(file), append, overwrite))
from time import sleep from streamer import streamer def _packet_callback(p): print '%s -> %s' % (p.get('source'), p.get('word')) return True stream = streamer(_packet_callback) while True: sleep(1)