def pa(): alpha = [ "a1.2.6", "a1.2.5", "a1.2.4_01", "a1.2.3_04", "a1.2.3_02", "a1.2.3_01", "a1.2.3", "a1.2.2b", "a1.2.2a", "a1.2.1_01", "a1.2.1", "a1.2.0_02", "a1.2.0_01", "a1.2.0", "a1.1.1_01", "a1.1.2", "a1.1.0", "a1.0.17_04", "a1.0.17_02", "a1.0.16", "a1.0.15", "a1.0.14", "a1.0.11", "a1.0.5_01", "a1.0.4", "inf-20100618", "c0.30_o1c", "c0.0.13a", "c0.0.13a_03", "c0.0.11", "rd-161348", "rd-160052", "rd-20090515", "rd-132328", "rd-132211" ] for counter in range(len(alpha)): print("Alpha " + str(alpha) + '\t\t' + alpha[counter] + '\t\t') if counter == range(len(alpha)): break else: pass #end #end counter = range(len(alpha)) a = int(input("Enter Version Number: ")) if a in counter: version = alpha[a] log.main("Version selected: " + version) return version else: print("Please select a number between zero and " + len(alpha))
def pr(): release = [ '1.14.1', '1.14', '1.13.2', '1.13.1', '1.13', '1.12.2', '1.12.1', '1. 12', '1.11.2', '1.11.1', '1.11', '1.10.2', '1.10.1', '1.10', '1.9.4', '1.9.3', '1.9.2', '1.9.1', '1.9', '1.8.9', '1.8.8', '1.8.7', '1.8.6', '1.8.5', '1.8.4', '1.8.3', '1.8.2', '1.8.1', '1.8', '1.7.10', '1.7.9', '1.7.8', '1.7.7', '1.7.6', '1.7.5', '1.7.4', '1.7.3', '1.7.2', '1.6.4', '1.6.2', '1.6.1', '1.5.2', '1.5.1', '1.4.7', '1.4.6', '1.4.5', '1.4.4', '1.4.2', '1.3.2', '1.3.1', '1.2.5', '1.2.4', '1.2.3', '1.2.2', '1.2.1', '1.1', '1.0' ] for counter in range(len(release)): print("Release " + str(counter) + '\t\t' + release[counter] + '\t\t') if counter == range(len(release)): break else: pass #end #end counter = range(len(release)) a = int(input("Enter Version Number: ")) if a in counter: version = release[a] log.main("Version selected: " + version) return version else: print("Please select a number between zero and " + len(release))
def download(): user = pwd.getpwuid(os.getuid())[0] sys.path.append("/home/" + user + "/bin/MinecraftLauncher/Python/libs") import constants, log log.main("Dowloading list file") ur.urlretrieve(constants.JSON_URL, constants.JSON_PATH) if os.path.isfile(constants.JSON_PATH): log.main("Done!") log.main("List Path: " + constants.JSON_PATH) else: log.main("Can't download file.") sys.exit(1)
def main(): #get config con = resolve_config.main() if con.script_function == "pwn": while 1: for ip in con.ip_list: message = pwn_attack.main(ip) message = submit.main(con.submit, message) log.main(ip, message) time.sleep(con.submit_wait) time.sleep(con.round_time) elif con.script_function == "web": while 1: for ip in con.ip_list: message = web_attack.main(ip) message = submit.main(con.submit, message) log.main(ip, message) time.sleep(con.submit_wait) time.sleep(con.round_time) elif con.script_function == "local": while 1: for flag in con.flag_list: message = {'getflag_status': 'getflag success', 'flag': flag} message = submit.main(con.submit, message) log.main(flag, message) time.sleep(con.submit_wait) time.sleep(con.round_time) else: print "script_function set error" clean()
def main(): #get config start_ip, end_ip, skip_ip, port, submit_addr, script_function, url_file, flag_file, round_time, token, success_request, failed_request, flag_start, flag_end = resolve_config.main( ) ip_list = target.main(start_ip, end_ip, skip_ip) if script_function == "pwn": while 1: for ip in ip_list: message = pwn_flag.main(ip, port) submit_status = submit.main(submit_addr, message, token, success_request, failed_request) message['submit_status'] = submit_status['status'] log.main(ip, message) time.sleep(round_time) elif script_function == "web": while 1: for url in url_file: message = web_flag.main(url, flag_start, flag_end) submit_status = submit.main(submit_addr, message, token, success_request, failed_request) message['submit_status'] = submit_status['status'] log.main(url, message) time.sleep(round_time) elif script_function == "local": while 1: for flag in flag_file: message = {'getflag_status': 'getflag success', 'flag': flag} submit_status = submit.main(submit_addr, message, token, success_request, failed_request) message['submit_status'] = submit_status['status'] log.main(flag, message) time.sleep(round_time) else: print "script_function set error" clean()
def pb(): beta = [ "b.1.8.1", "b1.8", "b1.7.3", "b1.7.2" "b1.7", "b1.6.6", "b1.6.5", "b1.6.4", "b1.6.3", "b1.6.2", "b1.6.1", "b1.6", "b1.5_01", "b1.5_01", "b1.5", "b1.4_01", "b1.4", "b1.3_01", "b1.3", "b1.2_01", "b1.2", "b1.1_02", "b1.1_01", "b1.1.0.2", "b1.0_01", "b1.0" ] for counter in range(len(beta)): print("Beta " + str(beta) + '\t\t' + beta[counter] + '\t\t') if counter == range(len(beta)): break else: pass #end #end counter = range(len(beta)) a = int(input("Enter Version Number: ")) if a in counter: version = beta[a] log.main("Version selected: " + version) return version else: print("Please select a number between zero and " + len(beta))
def preeditmovie(expt_raw_data_dir, expt_analyses_dir, positions, params): """ Automated steps to perform prior to editing. """ expt = os.path.basename(expt_analyses_dir) g = params['general'] # First load or create log files for each position log.main(expt_raw_data_dir, expt_analyses_dir, positions, g['write_mode']) # Execute each position in succession for p in positions: # Update the terminal display read.updatelog(expt, p, 'preedit') print 'start position ' + p + ': ' + time.asctime() posn_raw_data_dir = os.path.join(expt_raw_data_dir, p) posn_analyses_dir = os.path.join(expt_analyses_dir, p) # Segmented files will be saved to a temporary directory temp_dir = os.path.join(posn_analyses_dir, 'temp') if g['write_mode'] == 0: read.rmkdir(temp_dir) else: read.cmkdir(temp_dir) # Pad with default parameters, and find frames to process frame_start, frame_stop = float('inf'), 0. for mode in MODES: print '---mode', mode d = params[mode] # Pad with default parameters as necessary d = eval('%s.workflow.fillparams(d)' % mode) # Find all .tif images of specified type in the given directory d['segment']['file_list'] = [] for f in read.listfiles(posn_raw_data_dir, d['segment']['pattern']): j = read.getframenum(f, d['segment']['pattern']) if g['frame_range'][0] <= j < g['frame_range'][1]: frame_start = min(frame_start, j) frame_stop = max(frame_stop, j) d['segment']['file_list'].append(f) frame_stop += 1 # Create arguments for parallel processing args = [(posn_raw_data_dir, temp_dir, MODES, copy.deepcopy(params)) for _ in range(g['num_procs'])] file_list = sorted(args[0][3]['phase']['segment']['file_list']) # # debug: select only a few files -BK # print 'initial frame stop', frame_stop # frame_stop = 500 # file_list = file_list[:frame_stop] # # debug: select only a few files -BK inds = partition_indices(file_list, g['num_procs']) for (sta_ind, end_ind), arg in zip(inds, args): arg[3]['phase']['segment']['file_list'] = file_list[sta_ind:end_ind] # Process each block of frames in parallel parallel.main(preeditblock, args, g['num_procs']) print 'extract: ' + time.asctime() # Archive the output files into .zip files, then delete each .tif num_tifs = frame_stop - frame_start num_digits = int(np.ceil(np.log10(num_tifs + 1))) # Create new set of directories with pre-specified block size frames = range(frame_start, frame_stop-1, g['block_size']) frames.append(frame_stop) block_frames = zip(frames[:-1], frames[1:]) # Make directories to hold files, named according to frames read.cmkdir(os.path.join(posn_analyses_dir, 'blocks')) block_dirs = [] for j1, j2 in block_frames: strs = [str(v).zfill(num_digits) for v in (j1, j2)] v = os.path.join(posn_analyses_dir, 'blocks', 'frame{}-{}'.format(*strs)) os.mkdir(v) block_dirs.append(v) for m in MODES: # The segmented .tif files will be stored in a .zip file zip_name = m.capitalize() + 'Segment' [read.cmkdir(os.path.join(v, zip_name)) for v in block_dirs] # Find all segmented .tif images and transfer to the new directories d = params[m] for f in read.listfiles(temp_dir, d['segment']['pattern']): j = read.getframenum(f, d['segment']['pattern']) for i, (j1, j2) in enumerate(block_frames): if j1 <= j < j2: old_name = os.path.join(temp_dir, f) zip_dir = os.path.join(block_dirs[i], zip_name) shutil.move(old_name, zip_dir) # Zip each directory of segmented .tif files old_dir = os.path.abspath(os.curdir) for v in block_dirs: os.chdir(v) archive_util.make_zipfile(zip_name, zip_name) shutil.rmtree(zip_name) os.chdir(old_dir) # Make temporary directories for data outputs dat_name = m.capitalize() + 'Data' [read.cmkdir(os.path.join(v, dat_name)) for v in block_dirs] # Find all analyzed .pickle files and transfer to the new directories f, e = os.path.splitext(d['segment']['pattern']) dat_pattern = (f + '.pickle' + e[4:]) for f in read.listfiles(temp_dir, dat_pattern): j = read.getframenum(f, dat_pattern) for i, (j1, j2) in enumerate(block_frames): if j1 <= j < j2: # Transfer each frame to the correct block old_name = os.path.join(temp_dir, f) dat_dir = os.path.join(block_dirs[i], dat_name) shutil.move(old_name, dat_dir) # Concatenate each set of files into a DataFrame for each parameter for block_dir in block_dirs: dat_dir = os.path.join(block_dir, dat_name) data = [] for u in os.listdir(dat_dir): dat_file = os.path.join(dat_dir, u) try: d = read_pickle(dat_file) except: pass data.append(d) df = concat(data) df = df.reindex(sorted(df.index)) for c in df.columns: df[c].to_pickle(os.path.join(block_dir, c + '.pickle')) shutil.rmtree(dat_dir) print 'shuffle: ' + time.asctime() # Delete all temporary files shutil.rmtree(temp_dir) ''' block_dirs = [os.path.join(posn_analyses_dir, 'blocks', v) for v in os.listdir(os.path.join(posn_analyses_dir, 'blocks')) if 'frame' in v] ''' # Track the blocks in parallel args = [] for v in block_dirs: output_file = os.path.join(v, 'Trace.pickle') if os.path.isfile(output_file): os.remove(output_file) args.append((v, output_file, params['phase']['track'])) parallel.main(trackblock, args, g['num_procs']) print 'track: ' + time.asctime() # Stitch independently-tracked trajectories together stitchblocks(block_dirs, params['phase']['track']) print 'stitch: ' + time.asctime() # Collate the data for manual editing output_file = os.path.join(posn_analyses_dir, 'edits.pickle') collateblocks(block_dirs, output_file, params['phase']['collate']) print 'collate: ' + time.asctime() # Update the experiment log file read.updatelog(expt, p, 'preedit', expt_analyses_dir) print 'final: ' + time.asctime()
def preeditmovie(expt_raw_data_dir, expt_analyses_dir, positions, params): """ Automated steps to perform prior to editing. """ expt = os.path.basename(expt_analyses_dir) g = params['general'] # First load or create log files for each position log.main(expt_raw_data_dir, expt_analyses_dir, positions, g['write_mode']) # Execute each position in succession for p in positions: # Update the terminal display read.updatelog(expt, p, 'preedit') print 'start position ' + p + ': ' + time.asctime() posn_raw_data_dir = os.path.join(expt_raw_data_dir, p) posn_analyses_dir = os.path.join(expt_analyses_dir, p) # Segmented files will be saved to a temporary directory temp_dir = os.path.join(posn_analyses_dir, 'temp') if g['write_mode'] == 0: read.rmkdir(temp_dir) else: read.cmkdir(temp_dir) # Pad with default parameters, and find frames to process frame_start, frame_stop = float('inf'), 0. for mode in MODES: print '---mode', mode d = params[mode] # Pad with default parameters as necessary d = eval('%s.workflow.fillparams(d)' % mode) # Find all .tif images of specified type in the given directory d['segment']['file_list'] = [] for f in read.listfiles(posn_raw_data_dir, d['segment']['pattern']): j = read.getframenum(f, d['segment']['pattern']) if g['frame_range'][0] <= j < g['frame_range'][1]: frame_start = min(frame_start, j) frame_stop = max(frame_stop, j) d['segment']['file_list'].append(f) frame_stop += 1 # Create arguments for parallel processing args = [(posn_raw_data_dir, temp_dir, MODES, copy.deepcopy(params)) for _ in range(g['num_procs'])] file_list = sorted(args[0][3]['phase']['segment']['file_list']) # # debug: select only a few files -BK # print 'initial frame stop', frame_stop # frame_stop = 500 # file_list = file_list[:frame_stop] # # debug: select only a few files -BK inds = partition_indices(file_list, g['num_procs']) for (sta_ind, end_ind), arg in zip(inds, args): arg[3]['phase']['segment']['file_list'] = file_list[ sta_ind:end_ind] # Process each block of frames in parallel parallel.main(preeditblock, args, g['num_procs']) print 'extract: ' + time.asctime() # Archive the output files into .zip files, then delete each .tif num_tifs = frame_stop - frame_start num_digits = int(np.ceil(np.log10(num_tifs + 1))) # Create new set of directories with pre-specified block size frames = range(frame_start, frame_stop - 1, g['block_size']) frames.append(frame_stop) block_frames = zip(frames[:-1], frames[1:]) # Make directories to hold files, named according to frames read.cmkdir(os.path.join(posn_analyses_dir, 'blocks')) block_dirs = [] for j1, j2 in block_frames: strs = [str(v).zfill(num_digits) for v in (j1, j2)] v = os.path.join(posn_analyses_dir, 'blocks', 'frame{}-{}'.format(*strs)) os.mkdir(v) block_dirs.append(v) for m in MODES: # The segmented .tif files will be stored in a .zip file zip_name = m.capitalize() + 'Segment' [read.cmkdir(os.path.join(v, zip_name)) for v in block_dirs] # Find all segmented .tif images and transfer to the new directories d = params[m] for f in read.listfiles(temp_dir, d['segment']['pattern']): j = read.getframenum(f, d['segment']['pattern']) for i, (j1, j2) in enumerate(block_frames): if j1 <= j < j2: old_name = os.path.join(temp_dir, f) zip_dir = os.path.join(block_dirs[i], zip_name) shutil.move(old_name, zip_dir) # Zip each directory of segmented .tif files old_dir = os.path.abspath(os.curdir) for v in block_dirs: os.chdir(v) archive_util.make_zipfile(zip_name, zip_name) shutil.rmtree(zip_name) os.chdir(old_dir) # Make temporary directories for data outputs dat_name = m.capitalize() + 'Data' [read.cmkdir(os.path.join(v, dat_name)) for v in block_dirs] # Find all analyzed .pickle files and transfer to the new directories f, e = os.path.splitext(d['segment']['pattern']) dat_pattern = (f + '.pickle' + e[4:]) for f in read.listfiles(temp_dir, dat_pattern): j = read.getframenum(f, dat_pattern) for i, (j1, j2) in enumerate(block_frames): if j1 <= j < j2: # Transfer each frame to the correct block old_name = os.path.join(temp_dir, f) dat_dir = os.path.join(block_dirs[i], dat_name) shutil.move(old_name, dat_dir) # Concatenate each set of files into a DataFrame for each parameter for block_dir in block_dirs: dat_dir = os.path.join(block_dir, dat_name) data = [] for u in os.listdir(dat_dir): dat_file = os.path.join(dat_dir, u) try: d = read_pickle(dat_file) except: pass data.append(d) df = concat(data) df = df.reindex(sorted(df.index)) for c in df.columns: df[c].to_pickle(os.path.join(block_dir, c + '.pickle')) shutil.rmtree(dat_dir) print 'shuffle: ' + time.asctime() # Delete all temporary files shutil.rmtree(temp_dir) ''' block_dirs = [os.path.join(posn_analyses_dir, 'blocks', v) for v in os.listdir(os.path.join(posn_analyses_dir, 'blocks')) if 'frame' in v] ''' # Track the blocks in parallel args = [] for v in block_dirs: output_file = os.path.join(v, 'Trace.pickle') if os.path.isfile(output_file): os.remove(output_file) args.append((v, output_file, params['phase']['track'])) parallel.main(trackblock, args, g['num_procs']) print 'track: ' + time.asctime() # Stitch independently-tracked trajectories together stitchblocks(block_dirs, params['phase']['track']) print 'stitch: ' + time.asctime() # Collate the data for manual editing output_file = os.path.join(posn_analyses_dir, 'edits.pickle') collateblocks(block_dirs, output_file, params['phase']['collate']) print 'collate: ' + time.asctime() # Update the experiment log file read.updatelog(expt, p, 'preedit', expt_analyses_dir) print 'final: ' + time.asctime()
#!/usr/bin/env python2 import urllib as ur import pwd, os, sys user = pwd.getpwuid(os.getuid())[0] sys.path.append("/home/" + user + "/bin/MinecraftLauncher/Python/libs") import constants, log log.main("Dowloading list file") ur.urlretrieve(constants.JSON_URL, constants.JSON_PATH) if os.path.isfile(constants.JSON_PATH): log.main("Done!") log.main("List Path: " + constants.JSON_PATH) else: log.main("Can't download file.") sys.exit(1) #end
def psn(): snapshot = [ "1.14 Pre-Release 5", "1.14 Pre-Release 4", "1.14 Pre-Release 3", "1.14 Pre-Release 2", "1.14 Pre-Release 1", "19w14b", "19w14a", "3D Shareware v1.34", "19w13b", "19w13a", "19w12b", "19w12a", "19w11b", "19w11a", "19w09a", "19w08b", "19w08a", "19w07a", "19w06a", "19w05a", "19w04b", "19w04a", "19w03c", "19w03b", "19w03a", "19w02a", "18w50a", "18w49a", "18w48b", "18w48a", "18w47b", "18w47a", "18w46a", "18w45a", "18w44a", "18w43c", "18w43b", "18w43a", "1.13.2-pre2", "1.13.2-pre1", "1.13.1-pre2", "1.13.1-pre1", "18w33a", "18w32a", "18w31a", "18w30b", "18w30a", "1.13-pre10", "1.13-pre9", "1.13-pre8", "1.13-pre7", "1.13-pre6", "1.13-pre5", "1.13-pre4", "1.13-pre3", "1.13-pre2", "1.13-pre1", "18w22c", "18w22b", "18w22a", "18w22b", "18w22a", "18w21b", "18w21a", "18w20c", "18w20b", "18w20a", "18w19b", "18w19a", "18w16a", "18w15a", "18w14b", "18w14a", "18w11a", "18w10d", "18w10c", "18w10b", "18w10a", "18w09a", "18w08b", "18w08a", "18w07b", "18w07a", "18w06a", "18w05a", "18w03a", "18w02a", "18w01a", "17w50a", "17w49b", "17w49a", "17w48a", "17w47b", "17w47a", "17w46a", "17w45b", "17w45a", "17w43b", "17w43a", "1.12.2-pre2", "1.12.2-pre1", "1.12.1-pre1", "17w31a", "1.12-pre7", "1.12-pre6", "1.12-pre5", "1.12-pre4", "1.12-pre3", "1.12-pre2", "1.12-pre1", "17w18b", "17w18a", "17w17b", "17w17a", "17w16b", "17w16a", "17w15a", "17w14b", "17w14a", "17w13b", "17w13a", "17w06a", "16w50a", "1.11-pre1", "16w44a", "16w43a", "16w42a", "16w41a", "16w40a", "16w39c", "16w39b", "16w39a", "16w38a", "16w36a", "16w35a", "16w33a", "16w32b", "16w32a", "1.10-pre2", "1.10-pre1", "16w21b", "16w21a", "16w20a", "1.9.3-pre3", "1.9.3-pre2", "1.9.3-pre1", "16w15b", "16w15a", "16w14a", "1.RV-pre-1", "1.9.1-pre3", "1.9.1-pre2", "1.9.1-pre3", "1.9-pre4", "1.9-pre3", "1.9-pre2", "1.9-pre1", "16w07b", "16w07a", "16w06a", "16w05b", "16w05a", "16w04a", "16w03a", "16w02a", "15w51b", "15w51a", "15w50a", "15w19b", "15w49a", "15w47c", "15w47b", "15w47a", "15w46a", "15w45a", "15w44b", "15w44a", "15w43b", "15w43a", "15w42a", "15w41b", "15w41a", "15w40b", "15w40a", "15w39c", "15w39b", "15w39a", "15w38b", "15w38a", "15w37a", "15w36d", "15w36c", "15w36b", "15w36a", "15w35e", "15w35d", "15w35c", "15w35b", "15w35a", "15w34d", "15w34c", "15w34b", "15w34a", "15w33c", "15w33b", "15w33a", "15w32c", "15w32b", "15w32a", "15w31c", "15w31b", "15w31a", "1.8.2-pre7", "1.8.2-pre6", "1.8.2-pre5", "1.8.2-pre4", "1.8.2-pre3", "1.8.2-pre2", "1.8.2-pre1", "1.8.1-pre5", "1.8.1-pre4", "1.8.1-pre3", "1.8.1-pre2", "1.8.1-pre1", "1.8-pre3", "1.8-pre2", "1.8-pre1", "14w34d", "14w34c", "14w34b", "14w34a", "14w33c", "14w33b", "14w33a", "14w32d", "14w32c", "14w32b", "14w32a", "14w31a", "14w30c", "14w30b", "14w30a", "14w29b", "14w29a", "14w28b", "14w28a", "14w27b", "14w27a", "14w26c", "14w26b", "14w26a", "14w25b", "14w25a", "14w21b", "14w21a", "14w20b", "14w20a", "1.7.10-pre4", "1.7.10-pre3", "1.7.10-pre2", "1.7.10-pre1", "14w19a", "14w18b", "14w18a", "14w11b", "14w11a", "1.7.6-pre2", "1.7.6-pre1", "14w10c", "14w10b", "14w10a", "14w08a", "14w07a", "14w06b", "14w06a", "14w05a", "14w04b", "14w04a", "14w03b", "14w03a", "14w02c", "14w02b", "14w02a", "13w49a", "13w48b", "13w48a", "13w47e", "13w47d", "13w47c", "13w47b", "13w47a", "1.7.1", "1.7", "13w43a", "13w42b", "13w42a", "13w41b", "13w41a", "13w39b", "13w39a", "13w38c", "13w38b", "13w38a", "1.6.3", "13w37a", "13w36b", "13w36a", "1.6", "13w26a", "13w25c", "13w25b", "13w25a", "13w24b", "13w24a", "13w23b", "13w23a", "13w22a", "13w21b", "13w21a", "13w19a", "13w18c", "13w18b", "13w18a", "13w17a", "13w16b", "13w16a", "1.5", "1.4.3", "1.4", "1.3" ] for counter in range(len(snapshot)): print("Snapshot " + str(counter) + '\t\t' + snapshot[counter] + '\t\t') if counter == range(len(snapshot)): break else: pass #end #end counter = range(len(snapshot)) a = int(input("Enter Version Number: ")) if a in counter: version = snapshot[a] log.main("Version selected: " + version) return version else: print("Please select a number between zero and " + len(snapshot))
import pwd, os, sys import urllib as ur user = pwd.getpwuid(os.getuid())[0] sys.path.append("/home/" + user + "/bin/MinecraftLauncher/Python/libs") import log log.main("CLI Version Selector Started") def pr(): release = [ '1.14.1', '1.14', '1.13.2', '1.13.1', '1.13', '1.12.2', '1.12.1', '1. 12', '1.11.2', '1.11.1', '1.11', '1.10.2', '1.10.1', '1.10', '1.9.4', '1.9.3', '1.9.2', '1.9.1', '1.9', '1.8.9', '1.8.8', '1.8.7', '1.8.6', '1.8.5', '1.8.4', '1.8.3', '1.8.2', '1.8.1', '1.8', '1.7.10', '1.7.9', '1.7.8', '1.7.7', '1.7.6', '1.7.5', '1.7.4', '1.7.3', '1.7.2', '1.6.4', '1.6.2', '1.6.1', '1.5.2', '1.5.1', '1.4.7', '1.4.6', '1.4.5', '1.4.4', '1.4.2', '1.3.2', '1.3.1', '1.2.5', '1.2.4', '1.2.3', '1.2.2', '1.2.1', '1.1', '1.0' ] for counter in range(len(release)): print("Release " + str(counter) + '\t\t' + release[counter] + '\t\t') if counter == range(len(release)): break else: pass #end #end counter = range(len(release)) a = int(input("Enter Version Number: ")) if a in counter: version = release[a]
def create_logs(): import log log.main()
THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENT, COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY OR DAMAGE. THE USER ASSUMES RESPONSIBILITY FOR ALL LIABILITIES, PENALTIES, FINES, CLAIMS, CAUSES OF ACTION, AND COSTS AND EXPENSES, CAUSED BY, RESULTING FROM OR ARISING OUT OF, IN WHOLE OR IN PART THE USE, STORAGE OR DISPOSAL OF THE SOFTWARE. ******************************************************************************** ''' import parse import log from swadm import * from config import * enforce_requirements() # Parse options using a modified optparse. The modifications are implemented in # parse.py usage = "swlog [-d]\nExample: swlog [-d]" parser = parse.OptionParser(usage=usage) parser.add_option("-d", "--dialog", action="store_true", dest="dialog", help="use dialog (not editor) format") (opts, args) = parser.parse_args() log.main(opts.dialog)
import matplotlib.pyplot as plt import matplotlib.dates as mdates import numpy as np import interface import log from pprint import pprint log.main() c = interface.database.open_connection('database.db') signIn = c.create_table('signIn', 'id, timeIn, timeOut, seconds', 'REAL, REAL, REAL, REAL') ids = [] for id in signIn.select_entries(to_select='id'): if id not in ids: ids.append(id) a, b = [], [] for timeIn, timeOut, second in signIn.select_entries( to_select='timeIn, timeOut, seconds', conditions='id={}'.format(id[0])): a.append((timeIn + timeOut) / 2) b.append(second) plt.plot(a, b, label=log.names[id[0]]) plt.legend() plt.show()
def sendMessageDefault(chatid, text, replyid): response = requests.post( url= baseurl + "/sendMessage", data={'chat_id': chatid, 'text': text, 'reply_to_message_id': replyid, 'parse_mode': "Markdown"} ).json() return response def checkTrigger(msg): for x in plugins: for regex in plugins[x].regex: match = re.search(regex, msg['text']) if match != None: rval = plugins[x].main(msg) print(type(rval)) if isinstance(rval, str): sendMessageDefault(msg['chat']['id'], rval, msg['message_id']) elif isinstance(rval, dict): sendMessage(rval) return while run: update = requests.get(baseurl + "/getUpdates?offset=%s" % update_id) getupdate = json.loads(update.text) for i in range(len(getupdate['result'])): msg = getupdate['result'][i]['message'] log.main(msg, text_file) if 'text' in msg: checkTrigger(msg) if i == len(getupdate['result']) - 1: update_id = getupdate['result'][i]['update_id'] + 1