def main(): args = parse_args() print("Getting metadata of gallery...") gallery = get_gallery_metadata(args.gid, args.token) image_path = ROOT + "/images/" + gallery["title"] if args.export_images and not os.path.exists(image_path): os.mkdir(image_path) print("Getting", gallery["filecount"], "page links...") page_links = get_page_links(args.gid, args.token, int(gallery["filecount"]), args.source, args.member_id, args.pass_hash) print("Getting images...") images = [] _get_image = partial(get_image, member_id=args.member_id, pass_hash=args.pass_hash, raw_image=args.raw_images) with ThreadPoolExecutor(max_workers=args.worker) as executor: future = executor.map(_get_image, page_links) for page, image in enumerate(future, 1): images.append(image) if args.export_images: with open(image_path + "/" + str(page) + ".jpg", "wb") as f: f.write(image) with open(ROOT + "/galleries/" + gallery["title"] + ".pdf", "wb") as file: file.write( img2pdf.convert([remove_transparency(image) for image in images]))
def main(args): problem_ids, episodes, grid = parse_args(args) for problem_id in problem_ids: agent = ReinforcementLearningAgent(problem_id=problem_id, map_name_base=grid) agent.solve(episodes=episodes) agent.evaluate(episodes) passive_agent = PassiveAgent(problem_id=problem_id, map_name_base=grid) passive_agent.solve() passive_agent.evaluate(episodes) compare_utils(passive_agent.U, agent.U)
def main(args): """Main Program.""" problem_ids, episodes, grid = parse_args(args) print('It was found out that setting the seed for random was slow.. you can turn it on with seed=True') print('More info in documentation...') # Reset the random generator to a known state (for reproducability) np.random.seed(12) for problem_id in problem_ids: # this seed doesn't work... if needed, change seed to True below agent = RandomAgent(problem_id=problem_id, map_name_base=grid) agent.solve(episodes=episodes, seed=None) agent.evaluate(episodes)
def listener(self, robot): while True: # if time.time() - last >= 60: # last = time.time() # self._uart.write("#") # elif time.time() - last >= 20: # self._uart.write(",") # parse the serial port input for one command received_cmd = self._uart.readline().split("\\n")[0].split( "\x00")[-1] # split it into fn_key and then the args cmd_list = received_cmd.split() if len(cmd_list) > 0: fn_key = cmd_list[0] args = hp.parse_args(cmd_list[1:]) # print('fn_key: ', fn_key) # print('args: ', args) if fn_key in self._properties.dict.keys(): val = self._properties.dict[fn_key] self.send_retval_to_ub(val, delim="_") elif fn_key in self._functions.dict.keys(): # execute the function todo = self._functions.dict[fn_key](*args) if todo is not None: todo[1]['time'] += time.time() self._queued_commands.update({todo[0]: todo[1]}) elif fn_key == "Interrupt": if len(args) > 0: self._event_ids.append(args[0]) elif fn_key == "Reset": self._functions.dict['reset']() else: print(fn_key + " not a valid command") for key in self._queued_commands.keys(): if time.time() > self._queued_commands[key]['time']: self._functions.dict[key]( *self._queued_commands[key]['args']) del self._queued_commands[key]
import numpy as np from matplotlib import pyplot as plt from helpers import parse_args, timer from generate_data import generate_x, find_min_max from visualisation import visualise_x if __name__ == '__main__': filename = 'task_2.log' args = parse_args() x_msg = f"X generation with N = {args.N} and M = {args.M}" X = timer(generate_x, filename, x_msg)(args.M, args.N) y_msg = "Finding optimums for X" YMin, YMax = timer(find_min_max, filename, y_msg)(X, args.T, args.k) for _ in range(args.amount_graphs): start = np.random.randint(0, args.N * (args.M - 1)) visualise_x(X, start, args.N, YMin, YMax) plt.legend() plt.show()
import torch.nn as nn import torch.optim as optim from torchtext import data import helpers import model_helpers import simplelstm BATCH_SIZE = 32 DEFAULT_ARTICLE_FOLDER = "./articles" SEED = 0 DEFAULT_TRAIN_DEVICE = 'cpu' if __name__ == "__main__": args = helpers.parse_args() if args.input: ARTICLE_FOLDER = args.input else: ARTICLE_FOLDER = DEFAULT_ARTICLE_FOLDER if args.device: TRAIN_DEVICE = args.device else: TRAIN_DEVICE = DEFAULT_TRAIN_DEVICE torch.manual_seed(SEED) device = torch.device(TRAIN_DEVICE) theme_folders = helpers.get_article_themes(ARTICLE_FOLDER)
if 'old' in parsed_args.keys(): set_global_command(parsed_args['old']) else: set_global_command('ax64') runs, actions, passalong = genruns(parsed_args) comment(total_runs=len(runs), actions=actions, passalong=passalong) fname = 'runs.txt' comment('W {} runs to {}'.format(len(runs), fname)) pretty_print('W {} runs to {}'.format(len(runs), fname)) write_runs_to_file(runs, actions, passalong, fname) comment('=' * 16, 'running...') runs_output, vectors = do_runs(runs, actions, passalong) comment('=' * 16, 'done!') write_csv('times.csv', 'NO.,TIME', runs_output) write_csv('vectors.csv', 'IP,OP,KEY,ROUNDS,IV,LZS', vectors) comment('W {} runtimes to times.csv'.format(len(runs_output))) pretty_print('W {} runtimes to times.csv'.format(len(runs_output))) comment('W {} test vectors written vectors.csv'.format(len(vectors))) pretty_print('W {} test vectors written vectors.csv'.format(len(vectors))) if __name__ == '__main__': # try catch around this please! parsed_args = parse_args(sys.argv) main(parsed_args)
from colors import colors as c try: import requests if not hasattr(requests, 'post'): raise ImportError from decouple import config except ImportError: print( f'{c.yellow}WARNING:{c.rst} Some modules are missing, trying to install them :' ) try: subprocess.check_call( [sys.executable, "-m", "pip", "install", 'requests']) subprocess.check_call( [sys.executable, "-m", "pip", "install", 'python-decouple']) print( f'{c.green}SUCCES:{c.rst} All required modules were successfully installed.' ) sys.exit(0) except Exception: print(f'{c.red}ERROR:{c.rst} Some modules couldn\'t be installed.') sys.exit(1) import helpers as h import services as s if __name__ == "__main__": repo = h.parse_args() h.check_for_dir(repo) new_repo = s.create_github_repo(repo) h.create_local_project(new_repo) sys.exit(0)
#!/usr/bin/env python from helpers import parse_args, get_instrument, write, query, ieee_488_2_block_data import os def download_file(path, device=None, backend=None): dirname, filename = os.path.split(path) inst = get_instrument(device, backend=backend) data = inst.write('MMEM:CDIR "' + dirname + '"') data = inst.query_raw('MMEM:DATA? "' + filename + '"') data = ieee_488_2_block_data(data) with open(filename, 'wb') as f: f.write(data) if __name__ == "__main__": def add_more_args(parser): parser.add_argument('--filename') args = parse_args(add_more_args) download_file(args.filename, args.device, args.backend)
You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. For more information please read the README file. The files can also be found at: https://github.com/intact-project/ild-cnn ''' import helpers as H import cnn_model as CNN # debug from ipdb import set_trace as bp # initialization args = H.parse_args() # Function for parcing command-line arguments train_params = { 'do' : float(args.do) if args.do else 0.5, # Dropout Parameter 'a' : float(args.a) if args.a else 0.3, # Conv Layers LeakyReLU alpha param [if alpha set to 0 LeakyReLU is equivalent with ReLU] 'k' : int(args.k) if args.k else 4, # Feature maps k multiplier 's' : float(args.s) if args.s else 1, # Input Image rescale factor 'pf' : float(args.pf) if args.pf else 1, # Percentage of the pooling layer: [0,1] 'pt' : args.pt if args.pt else 'Avg', # Pooling type: Avg, Max 'fp' : args.fp if args.fp else 'proportional', # Feature maps policy: proportional, static 'cl' : int(args.cl) if args.cl else 5, # Number of Convolutional Layers 'opt': args.opt if args.opt else 'Adam', # Optimizer: SGD, Adagrad, Adam 'obj': args.obj if args.obj else 'ce', # Minimization Objective: mse, ce 'patience' : args.pat if args.pat else 200, # Patience parameter for early stoping 'tolerance': args.tol if args.tol else 1.005, # Tolerance parameter for early stoping [default: 1.005, checks if > 0.5%] 'res_alias': args.csv if args.csv else 'res' # csv results filename alias }
You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. For more information please read the README file. The files can also be found at: https://github.com/intact-project/ild-cnn ''' import helpers as H import cnn_model as CNN # debug from ipdb import set_trace as bp # initialization args = H.parse_args() # Function for parcing command-line arguments train_params = { 'do': float(args.do) if args.do else 0.5, # Dropout Parameter 'a': float(args.a) if args.a else 0.3, # Conv Layers LeakyReLU alpha param [if alpha set to 0 LeakyReLU is equivalent with ReLU] 'k': int(args.k) if args.k else 4, # Feature maps k multiplier 's': float(args.s) if args.s else 1, # Input Image rescale factor 'pf': float(args.pf) if args.pf else 1, # Percentage of the pooling layer: [0,1] 'pt': args.pt if args.pt else 'Avg', # Pooling type: Avg, Max 'fp': args.fp if args.fp else 'proportional', # Feature maps policy: proportional, static 'cl': int(args.cl) if args.cl else 5, # Number of Convolutional Layers 'opt': args.opt if args.opt else 'Adam', # Optimizer: SGD, Adagrad, Adam 'obj': args.obj if args.obj else 'ce', # Minimization Objective: mse, ce 'patience':
def main(args): print(args) problem_ids, episodes, grid = parse_args(args) for problem_id in problem_ids: # this seed doesn't work... if needed, change seed to True below random_agent = RandomAgent(problem_id=problem_id, map_name_base=grid) random_agent.solve(episodes=episodes, seed=None) random_agent.evaluate(episodes) simple_agent = SimpleAgent(problem_id=problem_id, map_name_base=grid) simple_agent.solve(episodes=episodes) simple_agent.evaluate(episodes) rl_agent = ReinforcementLearningAgent(problem_id=problem_id, map_name_base=grid) rl_agent.solve(episodes=episodes) rl_agent.evaluate(episodes) passive_agent = PassiveAgent(problem_id=problem_id, map_name_base=grid) passive_agent.solve() passive_agent.evaluate(episodes) # Adding the plots for evaluation labels = ['Episodes', 'Mean Reward'] agents = { 'random': random_agent, 'simple': simple_agent, 'rl': rl_agent } title = 'Problem {}. Episodes vs Mean Reward Plot'.format(problem_id) filename = '{}_{}_first_1000_training'.format(problem_id, random_agent.env.ncol) subtitle = 'First 1000 Episodes vs Mean Reward (Training Phase)' plot_eval(agents, range(999), labels, filename, title, subtitle) filename = '{}_{}_training'.format(problem_id, random_agent.env.ncol) subtitle = 'Episodes Number vs Mean Reward (Training Phase)' plot_eval(agents, range(episodes), labels, filename, title, subtitle) filename = '{}_{}_first_1000_evaluation'.format( problem_id, random_agent.env.ncol) subtitle = 'First 1000 episodes vs Mean Reward (Evaluation Phase)' plot_eval(agents, range(999), labels, filename, title, subtitle, training=False) filename = '{}_{}_evaluation'.format(problem_id, random_agent.env.ncol) subtitle = 'Episodes Number vs Mean Reward (Evaluation Phase)' plot_eval(agents, range(episodes), labels, filename, title, subtitle, training=False) compare_utils(passive_agent.U, rl_agent.U)