def _do_one_output(self, output): legend = output.get_legend() data = output.get_iterdata() # If the -f option has been used, we save the data to disk as csv, # otherwise we print it to the screen. if self.options.fname is not None: write_csv(self.options.fname, legend, data) else: print_data(legend, data)
def main(self): # Open the remote file source = self.shark.get_file(self.options.file) # Specify the column list columns = [ # Views are similar in concept to SQL tables, and the one we are # creating has only one key, the source IP address. This means that # data will be grouped by source IP. # Note that we support auto-completion on column names! If you want # to try it, put a breakpoint on the line below and then type # "sk.columns." in the debugger. That will work in any inteactive # python editor as well (e.g. eclipse, bpython, dreampie...). Key(self.shark.columns.ip.src), # Each of the rows in the view is going to have a value column # containing the amount of bytes. Value(self.shark.columns.generic.bytes), ] # Create the view v = self.shark.create_view(source, columns) # Retrieve the view data. # Aggregated=True means that we want the view data in a single big # sample, suitable to be represented in a barchart. The default value # for aggregated is False and would give us single-second values ready # to be charted in a stripchart. output = v.get_data(aggregated=True) # Save the view data to disk. # Note that, in addition to the data, we need to provide the legend, # which is necessary to decode it. write_csv(CSV_FILE_NAME, v.get_legend(), output) print "View data written to file " + CSV_FILE_NAME # Done! We can close the view. v.close()
def main(app): # Open the remote file source = app.shark.get_file('/admin/noon.cap') ########################################################################## # Applying a view asynchronously. # When the amount of packets processed by a view on a shark appliance is # big, it could take minutes for the view data to be generated. # This example shows how to use the sync parameter of create_view() to # create a view asynchronously and inform the user about the processing # progress. ########################################################################## # Specify the column list columns = [ Key(app.shark.columns.ip.src), Value(app.shark.columns.generic.bytes) ] # Create the view, making sure that sync is set to False v = app.shark.create_view(source, columns, sync=False) # Loop until the progress reaches 100% while True: p = v.get_progress() print p if p == 100: break time.sleep(.5) # Retrieve the view data. output = v.get_data(aggregated=True) # Print the data to the screen for sample in output: print sample["t"].strftime("%H:%M:%S") for vals in sample['vals']: print '\t{0}\t{1}'.format(vals[0], vals[1]) v.close() ########################################################################## # Using filters. # Compared to the example in create_view.py, here we add a port 80 filter # to the view to create a "web top talkers". ########################################################################## # Specify a Shark filter for port 80 packets. The SharkFilter class # implements Pilot filters, which means that you can paste any filter # from Pilot to the line below. Other valid filter types are # WiresharkDisplayFilter (which supports any Wireshark display filter), # BpfFilter (for Wireshark capture filters) and TimeFilter (for # time-based filtering). filters = [SharkFilter('tcp.port_pair="80"')] # Specify the column list columns = [ Key(app.shark.columns.ip.src), Value(app.shark.columns.generic.bytes), ] # The list of filters is passes as a parameter to create_view() v = app.shark.create_view(source, columns, filters) # Retrieve the view data. output = v.get_data(aggregated=True) # Save the data to disk write_csv(CSV_FILE_NAME, v.get_legend(), output) v.close() ########################################################################## # Time-based data and manual output parsing. # This view shows the number of http requests over time, in a format # suitable to be represented on a stripchart. ########################################################################## # Specify the column list columns = [Value(app.shark.columns.http.answered_requests)] # The list of filters is passes as a parameter to create_view() print source, columns v = app.shark.create_view(source, columns) # Retrieve the view data. # View.get_iterdata() returns an iterator to the view output instead of the # full data. This is ideal when the data is very big, because it makes it # possible to process it while it's downloaded, saving memory at the client # side. # Note also that we don't specify aggregated=True. This means that we will # receive a sample for each second interval. output = v.get_iterdata() # Instad of using write_csv(), this time we manually parse the samples in # the output. # Each sample is a dictionary with the following remarkable fields: # - time: the sample time as a datetime # - vals: a list of tuples, containing the data for the sample # In this case, since our view doesn't have keys, each sample contains # exactly one value. The next example shows how to deal with multiple # values per sample. for sample in output: print '{0}\t{1}'.format(sample["t"], sample['vals'][0][0]) # close the view v.close() ########################################################################## # Advanced get_data() usage. # get_data() has some pretty powerful functionality for server-side # sorting and filtering of view output data. # This example splits the data in 1 minute intervals, and for each # interval it displays the top 3 talkers. ########################################################################## # Specify the column list columns = [ Key(app.shark.columns.ip.src), Value(app.shark.columns.generic.bytes) ] # The list of filters is passes as a parameter to create_view() v = app.shark.create_view(source, columns) # Retrieve the view data. output = v.get_data(delta=datetime.timedelta(minutes=1), # 1 minute samples sortby=1, # Sort by column 1 (bytes) sorttype='descending', # Sort from biggest to smallest fromentry=0, # For each 1 min sample, the # first element to display is # the biggest one toentry=2 # For each 1 min sample, the # last element to display is # the third biggest one ) # Proceed printing the data to the screen for sample in output: print sample["t"] for vals in sample['vals']: print '\t{0}\t{1}'.format(vals[0], vals[1]) v.close()