def main(): # Parse command line arguements args = Args() size = args.size() partitions = args.partitions() if isinstance(partitions, collections.Iterable): pd = partitions else: # try to find good partitions pd = [0] * 3 pd[0] = int(math.ceil(math.pow(partitions, 1. / 3.))) while partitions % pd[0] != 0: pd[0] -= 1 p = partitions / pd[0] pd[1] = int(math.ceil(math.sqrt(p))) while p % pd[1] != 0: pd[1] -= 1 pd[2] = p / pd[1] pd.sort() # Get sorted indices of the sizes and sort partitions in the same order sortedSizes = [ i[0] for i in sorted(enumerate(size), key=operator.itemgetter(1)) ] pd = [pd[sortedSizes[i]] for i in range(3)] maxp = pd[0] * pd[1] * pd[2] print 'Number of partitions in each direction: ' + ', '.join(map( str, pd)) + '; total partitions: ' + str(maxp) # Write partition file = args.output() for z in range(size[2]): pz = z / ((size[2] + pd[2] - 1) / pd[2]) for y in range(size[1]): py = y / ((size[1] + pd[1] - 1) / pd[1]) for x in range(size[0]): px = x / ((size[0] + pd[0] - 1) / pd[0]) p = px + (py + pz * pd[1]) * pd[0] if p < 0 or p >= maxp: raise IOError("Wrong partition number computed: " + str(p)) for i in range(5): print >> file, p file.close()
def main(): # Parse command line arguements args = Args() size = args.size() partitions = args.partitions() if isinstance(partitions, collections.Iterable): pd = partitions else: # try to find good partitions pd = [0] * 3 pd[0] = int(math.ceil(math.pow(partitions, 1.0 / 3.0))) while partitions % pd[0] != 0: pd[0] -= 1 p = partitions / pd[0] pd[1] = int(math.ceil(math.sqrt(p))) while p % pd[1] != 0: pd[1] -= 1 pd[2] = p / pd[1] pd.sort() # Get sorted indices of the sizes and sort partitions in the same order sortedSizes = [i[0] for i in sorted(enumerate(size), key=operator.itemgetter(1))] pd = [pd[sortedSizes[i]] for i in range(3)] maxp = pd[0] * pd[1] * pd[2] print "Number of partitions in each direction: " + ", ".join(map(str, pd)) + "; total partitions: " + str(maxp) # Write partition file = args.output() for z in range(size[2]): pz = z / ((size[2] + pd[2] - 1) / pd[2]) for y in range(size[1]): py = y / ((size[1] + pd[1] - 1) / pd[1]) for x in range(size[0]): px = x / ((size[0] + pd[0] - 1) / pd[0]) p = px + (py + pz * pd[1]) * pd[0] if p < 0 or p >= maxp: raise IOError("Wrong partition number computed: " + str(p)) for i in range(5): print >> file, p file.close()
def main(): # Parse command line arguements args = Args() # Create a temporary working directory tmpdir = TmpDir('gambit2seissol', args.tmpDir()) # Read the Gambit file try: mesh = GambitReader(args.inputFile(), args.noReorder()) except IOError, e: print >> sys.stderr, 'Could not parse GAMBIT file' print >> sys.stderr, str(e) sys.exit(1)
def main(): parser = Args.make_parser( 'Starts the named fuzzer. Additional arguments are passed through.') args, fuzzer_args = parser.parse_known_args() host = Host() device = Device.from_args(host, args) fuzzer = Fuzzer.from_args(device, args) with Cipd.from_args(fuzzer, args) as cipd: if cipd.install(): device.store(os.path.join(cipd.root, '*'), fuzzer.data_path('corpus')) print('\n****************************************************************') print(' Starting ' + str(fuzzer) + '.') print(' Outputs will be written to:') print(' ' + fuzzer.results()) if not args.foreground: print(' You should be notified when the fuzzer stops.') print(' To check its progress, use `fx fuzz check ' + str(fuzzer) + '`.') print(' To stop it manually, use `fx fuzz stop ' + str(fuzzer) + '`.') print('****************************************************************\n') fuzzer.start(fuzzer_args) title = str(fuzzer) + ' has stopped.' body = 'Output written to ' + fuzzer.results() + '.' print(title) print(body) host.notify_user(title, body) return 0
def test_from_args(self): host = MockHost() parser = Args.make_parser('description', name_required=False) # netaddr should get called with 'just-four-random-words', and fail with self.assertRaises(RuntimeError): args = parser.parse_args(['--device', 'just-four-random-words']) device = Device.from_args(host, args)
def test_from_args(self): mock_device = MockDevice() parser = Args.make_parser('description') with self.assertRaises(Fuzzer.NameError): args = parser.parse_args(['target']) fuzzer = Fuzzer.from_args(mock_device, args) with self.assertRaises(Fuzzer.NameError): args = parser.parse_args(['target4']) fuzzer = Fuzzer.from_args(mock_device, args)
def main(): parser = Args.make_parser( description='Lists corpus instances in CIPD for the named fuzzer') args = parser.parse_args() host = Host.from_build() device = Device.from_args(host, args) fuzzer = Fuzzer.from_args(device, args) with Corpus.from_args(fuzzer, args) as corpus: cipd = Cipd(corpus) print(cipd.instances()) return 0
def main(): parser = Args.make_parser( 'Lists the fuzzing corpus instances in CIPD for a named fuzzer') args = parser.parse_args() host = Host() device = Device.from_args(host, args) fuzzer = Fuzzer.from_args(device, args) cipd = Cipd(fuzzer) if not cipd.list(): return 1 return 0
def get_filters(parser): filters = " " if(not parser.action == 'crop'): return filters crop_args = {} crop_args['width'] = str(parser.width/100) crop_args['height'] = str(parser.height/100) crop_args['x_point'] = str(parser.x_point/100) crop_args['y_point'] = str(parser.y_point/100) Args.validate_crop_args(crop_args) # vid = cv2.VideoCapture(inp) # success, frame = vid.read() # cv2.imshow('frame',frame) # command to crop lower right quatar # ffmpeg -i in.mp4 -filter:v "crop=in_w/2:in_h/2:in_w/2:in_h/2" -c:a copy out.mp4 filters = ' -filter:v "crop=in_w*'+crop_args['width']+':in_h*'+crop_args['height']+ \ ':in_w*'+crop_args['x_point']+':in_h*'+crop_args['y_point']+'" ' return filters
def test_pull(self): mock = MockDevice() fuzzer = Fuzzer(mock, u'mock-package1', u'mock-target3') parser = Args.make_parser('description') args = parser.parse_args(['1/3']) corpus = Corpus.from_args(fuzzer, args) corpus.pull() self.assertIn( ' '.join( mock.get_ssh_cmd([ 'scp', '[::1]:' + fuzzer.data_path('corpus/*'), corpus.root ])), mock.host.history)
def main(): # Parse command line arguements args = Args() print 'Generating mesh with size', ', '.join(map(str, args.size())) # Create Mesh mesh = Mesh(args.size(), args.boundary()) print 'Number of elements:', len(mesh.elements()) # Write mesh if args.netcdf(): try: from lib.netcdf import NetcdfWriter except ImportError, e: print 'netcdf4-python could not be loaded:', e return print 'Mesh will contain', ' * '.join(map( str, args.partitions())), '=', reduce(operator.mul, args.partitions(), 1), 'partitions' NetcdfWriter(mesh, args.partitions(), args.outputFile())
def main(): parser = Args.make_parser('Stops the named fuzzer.') args = parser.parse_args() host = Host.from_build() device = Device.from_args(host, args) fuzzer = Fuzzer.from_args(device, args) if fuzzer.is_running(): print('Stopping ' + str(fuzzer) + '.') fuzzer.stop() else: print(str(fuzzer) + ' is already stopped.') return 0
def test_push(self): mock = MockDevice() fuzzer = Fuzzer(mock, u'mock-package1', u'mock-target3') parser = Args.make_parser('description') args = parser.parse_args(['1/3']) corpus = Corpus.from_args(fuzzer, args) with tempfile.NamedTemporaryFile(dir=corpus.root) as f: corpus.push() self.assertIn( ' '.join( mock.get_ssh_cmd([ 'scp', f.name, '[::1]:' + fuzzer.data_path('corpus') ])), mock.host.history)
def main(): parser = Args.make_parser( 'Runs the named fuzzer on provided test units, or all current test ' + 'units for the fuzzer. Use \'check-fuzzer\' to see current tests units.' ) args, fuzzer_args = parser.parse_known_args() host = Host.from_build() device = Device.from_args(host, args) fuzzer = Fuzzer.from_args(device, args) if fuzzer.repro(fuzzer_args) == 0: print('No matching artifacts found.') return 1 return 0
def test_from_args(self): fuzzer = Fuzzer(MockDevice(), u'mock-package1', u'mock-target3') parser = Args.make_parser('description') args = parser.parse_args(['1/3']) corpus = Corpus.from_args(fuzzer, args) self.assertTrue(os.path.exists(corpus.root)) tmp_dir = tempfile.mkdtemp() try: args = parser.parse_args(['1/3', '--staging', tmp_dir]) corpus = Corpus.from_args(fuzzer, args) self.assertEqual(tmp_dir, corpus.root) finally: shutil.rmtree(tmp_dir)
def main(): parser = Args.make_parser( description='Lists fuzzers matching NAME if provided, or all fuzzers.', name_required=False) args = parser.parse_args() host = Host.from_build() fuzzers = Fuzzer.filter(host.fuzzers, args.name) if len(fuzzers) == 0: print('No matching fuzzers.') return 1 print('Found %d matching fuzzers:' % len(fuzzers)) for fuzzer in fuzzers: print(' %s/%s' % fuzzer) return 0
def main(): parser = Args.make_parser( 'Transfers a fuzzing corpus for a named fuzzer from a device to CIPD') args = parser.parse_args() host = Host() device = Device.from_args(host, args) fuzzer = Fuzzer.from_args(device, args) if fuzzer.measure_corpus()[0] == 0: print('Ignoring ' + str(fuzzer) + '; corpus is empty.') return 0 with Cipd.from_args(fuzzer, args) as cipd: device.fetch(fuzzer.data_path('corpus/*'), cipd.root) if not cipd.create(): return 1 return 0
def main(): parser = Args.make_parser( 'Transfers corpus for a named fuzzer to a device', label_present=True) args = parser.parse_args() host = Host() device = Device.from_args(host, args) fuzzer = Fuzzer.from_args(device, args) if os.path.isdir(args.label): device.store(os.path.join(args.label, '*'), fuzzer.data_path('corpus')) return 0 with Cipd.from_args(fuzzer, args, label=args.label) as cipd: if not cipd.install(): return 1 device.store(os.path.join(cipd.root, '*'), fuzzer.data_path('corpus')) return 0
def main(): parser = Args.make_parser( 'Transfers a fuzzing corpus for a named fuzzer from a device to CIPD') args = parser.parse_args() host = Host.from_build() device = Device.from_args(host, args) fuzzer = Fuzzer.from_args(device, args) if fuzzer.measure_corpus()[0] == 0: print('Ignoring ' + str(fuzzer) + '; corpus is empty.') return 0 with Corpus.from_args(fuzzer, args) as corpus: corpus.pull() cipd = Cipd(corpus) if not args.no_cipd: cipd.create() return 0
def main(): # Parse command line arguements args = Args() print 'Generating mesh with size', ', '.join(map(str, args.size())) # Create Mesh mesh = Mesh(args.size(), args.boundary()) print 'Number of elements:', len(mesh.elements()) # Write mesh if args.netcdf(): try: from lib.netcdf import NetcdfWriter except ImportError, e: print 'netcdf4-python could not be loaded:', e return print 'Mesh will contain', ' * '.join(map(str, args.partitions())), '=', reduce(operator.mul, args.partitions(), 1), 'partitions' NetcdfWriter(mesh, args.partitions(), args.outputFile())
def main(): parser = Args.make_parser( 'Starts the named fuzzer. Additional arguments are passed through.') args, fuzzer_args = parser.parse_known_args() host = Host.from_build() device = Device.from_args(host, args) fuzzer = Fuzzer.from_args(device, args) if not args.monitor: with Corpus.from_args(fuzzer, args) as corpus: cipd = Cipd(corpus) if not args.no_cipd: cipd.install('latest') corpus.push() print( '\n****************************************************************' ) print(' Starting ' + str(fuzzer) + '.') print(' Outputs will be written to:') print(' ' + fuzzer.results()) if not args.foreground: print(' You should be notified when the fuzzer stops.') print(' To check its progress, use `fx fuzz check ' + str(fuzzer) + '`.') print(' To stop it manually, use `fx fuzz stop ' + str(fuzzer) + '`.') print( '****************************************************************\n' ) fuzzer.start(fuzzer_args) if not args.foreground: subprocess.Popen(['python', sys.argv[0], '--monitor', str(fuzzer)]) else: fuzzer.monitor() title = str(fuzzer) + ' has stopped.' body = 'Output written to ' + fuzzer.results() + '.' print(title) print(body) host.notify_user(title, body) return 0
def main(): args = Args() if args.signal: c = Control(args.port) if args.signal == u"start" : c.start() elif args.signal == u"stop" : c.stop() elif args.signal == u"restart" : c.restart() else: pool = Pool(Config.pool_size, init_worker) playlist = Playlist(args.port, pool) if args.query : try: print playlist.query() except IOError: pass # don't puke if quitting less(1) elif args.shuffle : print playlist.shuffle() elif args.repeat : print playlist.repeat() elif args.kontinue : print playlist.kontinue() elif args.next_album : print playlist.next_album() elif args.next_artist : print playlist.next_artist() else:
def main(): parser = Args.make_parser( 'Minimizes the current corpus for the named fuzzer. This should be ' + 'used after running the fuzzer for a while, or after incorporating a ' + 'third-party corpus using \'fetch-corpus\'') args, fuzzer_args = parser.parse_known_args() host = Host.from_build() device = Device.from_args(host, args) fuzzer = Fuzzer.from_args(device, args) with Cipd.from_args(fuzzer, args) as cipd: if cipd.install(): device.store(os.path.join(cipd.root, '*'), fuzzer.data_path('corpus')) if fuzzer.merge(fuzzer_args) == (0, 0): print('Corpus for ' + str(fuzzer) + ' is empty.') return 1 device.fetch(fuzzer.data_path('corpus/*'), cipd.root) if not cipd.create(): return 1 return 0
def main(): parser = Args.make_parser( description='Reports status for the fuzzer matching NAME if provided, ' + 'or for all running fuzzers. Status includes execution state, corpus ' + 'size, and number of artifacts.', name_required=False) args = parser.parse_args() host = Host() device = Device.from_args(host, args) fuzzers = Fuzzer.filter(host.fuzzers, args.name) pids = device.getpids() silent = True for pkg, tgt in fuzzers: fuzzer = Fuzzer(device, pkg, tgt) if not args.name and tgt not in pids: continue silent = False if tgt in pids: print(str(fuzzer) + ': RUNNING') else: print(str(fuzzer) + ': STOPPED') print(' Output path: ' + fuzzer.data_path()) print(' Corpus size: %d inputs / %d bytes' % fuzzer.measure_corpus()) artifacts = fuzzer.list_artifacts() if len(artifacts) == 0: print(' Artifacts: None') else: print(' Artifacts: ' + artifacts[0]) for artifact in artifacts[1:]: print(' ' + artifact) if silent: print( 'No fuzzers are running. Include \'name\' to check specific fuzzers.' ) return 1 return 0
def main(): parser = Args.make_parser( 'Minimizes the current corpus for the named fuzzer. This should be ' + 'used after running the fuzzer for a while, or after incorporating a ' + 'third-party corpus using \'fetch-corpus\'') args, fuzzer_args = parser.parse_known_args() host = Host.from_build() device = Device.from_args(host, args) fuzzer = Fuzzer.from_args(device, args) with Corpus.from_args(fuzzer, args) as corpus: cipd = Cipd(corpus) if not args.no_cipd: cipd.install('latest') corpus.push() if fuzzer.merge(fuzzer_args) == (0, 0): print('Corpus for ' + str(fuzzer) + ' is empty.') return 1 corpus.pull() if not args.no_cipd: cipd.create() return 0
#model = Ganomaly(args, dataloader) elif args.model == 'c2plus1d': from lib.train_stcnn import VFD_STCNN model = VFD_STCNN(args, dataloader) elif args.model == 'xception': from lib.train_stcnn import VFD_STCNN model = VFD_STCNN(args, dataloader) elif args.model == 'clstm': from lib.train_stcnn import VFD_STCNN model = VFD_STCNN(args, dataloader) else: print("\n %s is None." % (args.model)) exit() model.train() if __name__ == '__main__': args = Args().parse() print(args.gpu) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" if len(args.gpu) > 1: os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.gpu) else: os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu[0]) main(args)
self.write_template(self.dropper_template, self.dropper_py_temp, _dict) def start(self): self.compile_bot() if self.exe: self.compile_dropper() self.clean_up() def clean_up(self): spec_file = '{}.spec'.format(self.filename) remove(self.dropper_py_temp) remove(self.bot_py_temp) remove(spec_file) if __name__ == '__main__': args = Args() if args.set_args(): executor = Executor(args.ip, args.port, args.name, args.delay, args.wait, args.type, args.icon, args.hide, args.persist) executor.start() Popen('cls' if is_win else 'clear', shell=True).wait() print('\nFinished creating {}'.format( executor.filename + '.exe' if executor.exe else executor.bot_py_temp)) print('Look in the directory named dist for your exe file' if executor. exe else 'Look in the directory named bot for your Python file')
path = os.path.join(self.dist_path, file) shutil.move(path, output_dir) def start(self): self.compile_bot() self.compile_stager() self.clean_up() def clean_up(self): shutil.rmtree(self.tmp_dir) os.remove(self.bot_py_temp) os.remove(self.stager_py_temp) if __name__ == '__main__': args = Args() if args.set_args(): if not args.icon: icons = { 1: 'icons/wordicon.ico', 2: 'icons/excelicon.ico', 3: 'icons/ppticon.ico' } option = input( '\n\n1) MS Word\n2) MS Excel\n3) MS Powerpoint\n\nSelect an icon option: ' ) if not option.isdigit(): args.icon = icons[1] elif int(option) > 3 or int(option) < 1:
def handler(event, context): args = Args().args ec2_client = conn().boto3('ec2', args.region) cloudwatch_client = conn().boto3('cloudwatch', args.region) if args.volume and args.instance: logger.error("false. only one option is allowed") exit(1) if args.volume or args.instance: logger.info( "Defaulting to type 'create-snapshot' with inclusiong of arg: %s %s" % (args.instance, args.volume)) args.type = "create-snapshot" retention_day = timedelta(days=args.retention) start_date = Global.today - retention_day logger.info("*** Timing ***") logger.info("\tCurrent time: %i" % (Global.current_time)) logger.info("\tRetention: %i" % (args.retention)) logger.info("\tFull day in seconds: %i" % (Global.full_day)) logger.info("\tToday: %s" % (str(Global.today))) logger.info("\tTomorrow: %s" % (str(Global.tomorrow))) logger.info("\tYesterday: %s" % (str(Global.yesterday))) logger.info("\t2 Weeks Ago: %s" % (str(Global.two_weeks))) logger.info("\t4 Weeks Ago: %s" % (str(Global.four_weeks))) logger.info("\t30 Days Ago: %s" % (str(Global.thirty_days))) logger.info("\tRetention Time: %s" % (str(retention_day))) logger.info("\tStart Date: %s" % (str(start_date))) logger.info("\tShort Date: %s" % (Global.short_date)) logger.info("\tShort Hour: %s" % (Global.short_hour)) logger.info("") logger.info("*** Defined Args ***") logger.info("\targs.verbosity: %s" % (args.verbosity)) logger.info("\targs.type: %s" % (args.type)) logger.info("\targs.env: %s" % (args.env)) logger.info("\targs.volume: %s" % (args.volume)) logger.info("\targs.instance: %s" % (args.instance)) logger.info("\targs.retention: %s" % (args.retention)) logger.info("\targs.dry_run: %s" % (args.dry_run)) logger.info("\targs.region: %s" % (args.region)) logger.info("\targs.account_id: %s" % (args.account_id)) logger.info("\targs.rotation: %s" % (args.rotation)) logger.info("\targs.hourly: %s" % (args.hourly)) logger.info("\targs.persist: %s" % (args.persist)) logger.info("\targs.method: %s" % (args.method)) logger.info("\targs.include_ami: %s" % (args.include_ami)) logger.info("") Instance(ec2_client, args.dry_run).find(args.env, '') Volume(ec2_client, args.dry_run).find(cloudwatch_client, args.instance, args.volume, args.hourly, args.persist) if args.type != "create-snapshot" or args.type != "create-snapshots": Snapshot(ec2_client, args.dry_run).find(args.account_id, args.env, args.method) if not args.volume and not args.instance: if args.type != "clean-snapshot" or args.type != "clean-snapshots" or args.type != "clean-volume" or args.type != "clean-volumes": Image(ec2_client, args.dry_run).find(args.env, args.account_id) if args.type == "all" or args.type == "clean-snapshot" or args.type == "clean-snapshots" or args.type == "clean": snapshot_count = 0 logger.info("\n\n") logger.debug("Ignoring any env flag for cleanup: %s" % (args.env)) logger.info("") logger.info("*** Cleaning Snapshots ***") logger.debug("\tsnapshot_data len: %i" % (len(Global.snapshot_data))) for snapshot in Global.snapshot_data: logger.info("Retrieved snapshot: %s" % (snapshot)) if Global.volume_snapshot_count[Global.snapshot_data[snapshot] ['volume_id']]['count'] > 0: # if Global.volume_snapshot_count[Global.snapshot_data[snapshot]['volume_id']]['count'] > args.rotation and not Global.snapshot_data[snapshot]['persist'] and not Global.snapshot_data[snapshot]['id'] in Global.image_data: logger.debug("") logger.debug("snapshot id: %s" % (Global.snapshot_data[snapshot]['id'])) logger.debug("\tsnap_vol: %s" % (Global.snapshot_data[snapshot]['volume_id'])) logger.debug("\tsnap_desc: %s" % (Global.snapshot_data[snapshot]['description'])) logger.debug("\tsnap_date: %s" % (Global.snapshot_data[snapshot]['date'])) logger.debug("\tsnap_ratio: %s" % (Global.snapshot_data[snapshot]['ratio'])) logger.debug("\tsnap_age: %s" % (Global.snapshot_data[snapshot]['age'])) logger.debug("\tsnap_persist: %s" % (Global.snapshot_data[snapshot]['persist'])) logger.debug("\tsnap_method: %s" % (Global.snapshot_data[snapshot]['method'])) logger.debug("\tsnap_count: %s" % (Global.snapshot_data[snapshot]['snap_count'])) logger.debug( "\tvolume_snapshot_count: %s" % (Global.volume_snapshot_count[ Global.snapshot_data[snapshot]['volume_id']]['count'])) logger.debug("\trotation_scheme: %i" % (args.rotation)) logger.debug( "\tDeleting %s - [ snap_count:%s, volume_count:%s, persist: %s ] [ vol: %s ]" % (Global.snapshot_data[snapshot]['id'], Global.snapshot_data[snapshot]['snap_count'], Global.volume_snapshot_count[ Global.snapshot_data[snapshot]['volume_id']]['count'], Global.snapshot_data[snapshot]['persist'], Global.snapshot_data[snapshot]['volume_id'])) if Global.snapshot_data[snapshot][ 'volume_id'] not in Global.all_volumes: logger.debug( "\tvol: %s snap: %s snap_count: %s rotate: %i" % (Global.snapshot_data[snapshot]['volume_id'], Global.snapshot_data[snapshot]['id'], Global.volume_snapshot_count[Global.snapshot_data[ snapshot]['volume_id']]['count'], args.rotation)) ret_val = Snapshot(ec2_client, args.dry_run).delete( Global.snapshot_data[snapshot]['id'], '') snapshot_count = snapshot_count + ret_val Global.volume_snapshot_count[ Global.snapshot_data[snapshot] ['volume_id']]['count'] = Global.volume_snapshot_count[ Global.snapshot_data[snapshot] ['volume_id']]['count'] - ret_val else: logger.debug( "\tvol: %s snap: %s snap_count: %s rotate: %i" % (Global.snapshot_data[snapshot]['volume_id'], Global.snapshot_data[snapshot]['id'], Global.volume_snapshot_count[Global.snapshot_data[ snapshot]['volume_id']]['count'], args.rotation)) ret_val = Snapshot(ec2_client, args.dry_run).delete( Global.snapshot_data[snapshot]['id'], 'delete_snapshot') snapshot_count = snapshot_count + ret_val Global.volume_snapshot_count[ Global.snapshot_data[snapshot] ['volume_id']]['count'] = Global.volume_snapshot_count[ Global.snapshot_data[snapshot] ['volume_id']]['count'] - ret_val else: logger.warn("") logger.warn( "\tIgnoring deletion of %s - [ snap_count:%s, volume_count:%s, persist: %s ]" % (Global.snapshot_data[snapshot]['id'], Global.snapshot_data[snapshot]['snap_count'], Global.volume_snapshot_count[ Global.snapshot_data[snapshot]['volume_id']]['count'], Global.snapshot_data[snapshot]['persist'])) logger.info(" *** Total Snapshots Deleted: %s" % (snapshot_count)) if args.type == "all" or args.type == "clean-volume" or args.type == "clean-volumes" or args.type == "clean": volume_count = 0 logger.info("\n\n") logger.debug("Ignoring any env flag for cleanup: %s" % (args.env)) logger.info("*** Cleaning Volumes ***") logger.info( "*** Note: this tags items with tag { 'Delete': 'True' } ***\n") for volume in Global.volume_data: logger.info("Retrieved Volume: %s" % (volume)) volume_count = volume_count + 1 logger.debug("") logger.debug("volume_id: %s" % (Global.volume_data[volume]['id'])) logger.debug("\tvolume_instance_id: %s" % (Global.volume_data[volume]['instance_id'])) logger.debug("\tvolume_date: %s" % (Global.volume_data[volume]['date'])) logger.info(" *** Total Volumes To Delete: %s" % (volume_count)) if args.type == "all" or args.type == "clean-ami" or args.type == "clean" or args.type == "clean-images": image_count = 0 logger.info("\n\n") logger.debug("Ignoring any env flag for cleanup: %s" % (args.env)) logger.info("*** Cleaning Images ***") # logger.info("Include_ami: %s" % (args.include_ami)) logger.info("Images found: %i" % (len(Global.image_data))) for image in Global.image_data: image_count = image_count + 1 logger.debug("") logger.debug("ami_id: %s" % (Global.image_data[image]['id'])) logger.debug("\tami_name: %s" % (Global.image_data[image]['name'])) logger.debug("\tami_attachment_id: %s" % (Global.image_data[image]['date'])) logger.debug("\tami_snapshot_id: %s" % (Global.image_data[image]['snapshot_id'])) logger.debug("\tami_persist: %s" % (Global.image_data[image]['persist'])) logger.debug("\tami_build_method: %s" % (Global.image_data[image]['build_method'])) # this is disabled for now until we're sure we want to auto delete AMI's if args.include_ami: if Global.image_data[image]['persist'] != "True": logger.info("Deregistering AMI: %s" % (Global.image_data[image]['name'])) for ami_snapshot in Global.image_data[image][ 'snapshot_id']: logger.info("\t deleting snapshot: %s" % (ami_snapshot)) Snapshot(ec2_client, args.dry_run).delete(ami_snapshot, 'delete_snapshot') logger.info("\t deleting image: %s" % (Global.image_data[image]['id'])) Image(ec2_client, args.dry_run).delete( Global.image_data[image]['id'], Global.image_data[image]['name']) logger.info(" *** Total Images Deregistered: %s" % (image_count)) if args.type == "all" or args.type == "create-snapshot" or args.type == "create-snapshots": snapshot_count = 0 logger.info("\n\n") logger.info("*** Creating Snapshots ***") for s_volume in Global.snapshot_volumes: logger.debug("") logger.debug("\tsnapshot_volume['volume_id']: %s" % (Global.snapshot_volumes[s_volume]['id'])) logger.debug("\tsnapshot_volume['instance_id']: %s" % (Global.snapshot_volumes[s_volume]['instance_id'])) logger.debug("\tsnapshot_volume['date']: %s" % (Global.snapshot_volumes[s_volume]['date'])) logger.debug("\tsnapshot_volume['desc']: %s" % (Global.snapshot_volumes[s_volume]['desc'])) logger.debug("\tsnapshot_volume['old_desc']: %s" % (Global.snapshot_volumes[s_volume]['old_desc'])) logger.debug("\tsnapshot_volume['persist']: %s" % (Global.snapshot_volumes[s_volume]['persist'])) logger.debug("\tsnapshot_volume['hourly']: %s" % (Global.snapshot_volumes[s_volume]['hourly'])) snapshot_count = snapshot_count + Snapshot( ec2_client, args.dry_run).create( args.region, Global.snapshot_volumes[s_volume]['id'], Global.snapshot_volumes[s_volume]['desc'], Global.snapshot_volumes[s_volume]['old_desc'], Global.snapshot_volumes[s_volume]['persist']) logger.info(" *** Total Volumes to Snapshot: %s" % (snapshot_count)) return True