def find_top(args): rargs = util.parse_arguments(['--use-installed-pkgs', '--find-top'] + args) n = args[-1] libinstall = join(get_fastr_repo_dir(), "top%s.tmp" % n) if not os.path.exists(libinstall): os.mkdir(libinstall) os.environ['R_LIBS_USER'] = libinstall _installpkgs(rargs)
def main(): arguments = parse_arguments() initialize_distributed_backend(arguments) download_data(arguments) model = allocate_model() model = torch.nn.parallel.DistributedDataParallelCPU(model) optimizer = torch.optim.SGD(model.parameters(), lr=arguments.lr, momentum=arguments.momentum) worker_procedure(arguments, model, optimizer)
async def pc(self, ctx: commands.Context, *args): url = 'Z0DO0XyS8Ko' if ctx.message.id % 13 == 0: url = 'OzGVz1ClxIc' opt, args = parse_arguments(args) await self.music_handler.play_song(ctx, url, opt=opt, metadata={ 'title': 'Sandu Ciorba - Pe cimpoi', 'duration': 128 })
def main(): arguments = parse_arguments() initialize_distributed_backend(arguments) download_data(arguments) model = allocate_model() optimizer = GEM(model.parameters(), lr=arguments.lr, momentum=arguments.momentum) #optimizer = DOWNPOUR(model.parameters(), lr=arguments.lr) if is_master(): master_procedure(arguments, model, optimizer) else: worker_procedure(arguments, model, optimizer) optimizer.ready()
async def pcc(self, ctx: commands.Context, *args): url = '3H6QaUYVsVM' if ctx.message.id % 13 == 0: url = 'g1p5eNOsl7I' opt, args = parse_arguments(args) await self.music_handler.play_song( ctx, url, opt=opt, metadata={ 'title': 'My friends and I thought making a cover of Pe Cimpoi was a good idea but it was not', 'duration': 133 })
for i in range(0, iterations): start = time.time() for j in range(0, 5): runAlgo(set1[counter], set2[counter]) counter += 1 end = time.time() print("Iteration: " + str(i) + " is complete") lengths.append(length) values.append((end - start) / 5) length = int(length * 1.3) return lengths, values if __name__ == "__main__": args = sys.argv score_matrix, gap_cost, should_output_allignment, alphabet, fastaSeq1, fastaSeq2 = util.parse_arguments(args) A = next(iter(fastaSeq1.values())).replace(" ", "") B = next(iter(fastaSeq2.values())).replace(" ", "") print(A) print(B) n = len(A) m = len(B) print(runAlgo(A, B, s_mat=score_matrix, gc=gap_cost)) #T = np.empty([n+1, m+1]) #T[:] = float("inf") #print(np.dtype(T[0,0])) #res = cost(n,m) # TODO: If should_output_allignment == 1 -> print an optimal allignment
mOutput = np.vstack([mOutput, m]) if mOutput.shape[0] != data.shape[0]: print 'Invalid results' exit(1) return mOutput def removeFile(sFile): try: os.remove(sFile) except Exception: pass if __name__ == '__main__': arguments = util.parse_arguments([x for x in sys.argv[1:]]) if (not arguments.has_key('ark_file')) or (not arguments.has_key('wdir')) \ or (not arguments.has_key('output_file_prefix')) or (not arguments.has_key('model_file')) \ or (not arguments.has_key('deeplearn_path')): print "Error: the mandatory arguments are: --deeplearn-path --ark-file --wdir --output-file-prefix --model-file" exit(1) # mandatory arguments ark_file = arguments['ark_file'] wdir = os.path.abspath(arguments['wdir']) output_file_prefix = arguments['output_file_prefix'] sModelFile = arguments['model_file'] sDeeplearnPath = arguments['deeplearn_path'] # paths for output files
batch["image"] = batch["image"].cuda(self.device) target = batch["target"][0].numpy() pred = self.forward(batch)[0].squeeze().cpu().numpy() draw = ImageDraw.Draw(local_image) draw.rectangle([(0, 0), (63, 63)], outline=(255, 255, 255)) draw.rectangle([(max(pred[1] - data_box_dim, 0), max(pred[0] - data_box_dim, 0)), (min(pred[1] + data_box_dim, 64), min(pred[0] + data_box_dim, 64))], outline=(255, 0, 0)) draw.rectangle([(max(target[1] - data_box_dim, 0), max(target[0] - data_box_dim, 0)), (min(target[1] + data_box_dim, 64), min(target[0] + data_box_dim, 64))], outline=(0, 255, 0)) image.paste(local_image, ((t // 3) * 64, (t % 3) * 64)) if t == 9: break image.save(output_vis_path) if __name__ == '__main__': from util import parse_arguments from util import NestedTensorboardLogger from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint import os args = parse_arguments() logger = NestedTensorboardLogger(save_dir=os.path.join("runs", args.dataset), name=args.experiment) checkpoint_callback = ModelCheckpoint(filepath=os.path.join("runs", args.dataset, args.experiment, 'checkpoints'), save_top_k=-1, verbose=False, period=args.save_epoch) model = MNISTRegressionTrainer(args) trainer = Trainer(gpus=args.gpu, early_stop_callback=None, num_sanity_val_steps=args.sanity_steps, checkpoint_callback=checkpoint_callback, max_epochs=args.max_epoch, limit_val_batches=args.val_check_percent, val_check_interval=min(args.val_check_interval, 1.0), check_val_every_n_epoch=max(1, args.val_check_interval), resume_from_checkpoint=args.resume, distributed_backend=args.distributed_backend, logger=logger) trainer.fit(model)
import os import socket import traceback import util from datetime import datetime from messages import MessageParameters from psycopg2 import IntegrityError args = util.parse_arguments() interval_map = { 5: '5min', 15: '15min', 60: '60min', 240: '4hour', '1d': '1day', '7d': '1week' } require_date_format = {'1day', '1week'} error_messages_set = {"E,!NO_DATA!,,", "S,SERVER DISCONNECTED"} def get_earliest_data_time(cursor, ticker_id, interval, start_time): if start_time: return start_time cursor.execute( "SELECT end_time FROM stocks_stock{0} WHERE ticker_id={1} ORDER BY end_time LIMIT 1" .format(interval, ticker_id)) data = cursor.fetchall() if not data or not data[0]: return '' return str(data[0][0] if interval in
rc = k + len(phoneToPdfId[i]) majorVotesHard[predicts == rc, 1 + c] += 1 majorVotesSoft[:, 1 + c] += result[:, 2 + rc] majorVotesHard[:, 0] = majorVotesHard[:, 1:].argmax(1) majorVotesSoft[:, 0] = majorVotesSoft[:, 1:].argmax(1) np.savetxt(majorVotesHardFile, majorVotesHard, fmt='%f', delimiter=',') np.savetxt(majorVotesSoftFile, majorVotesSoft, fmt='%f', delimiter=',') return (majorVotesHard, majorVotesSoft) ################################################################################ if __name__ == '__main__': arguments = util.parse_arguments([x for x in sys.argv[1:]]) if ((not arguments.has_key('ark_file')) or (not arguments.has_key('wdir')) \ or (not arguments.has_key('output_file_prefix')) \ or (not arguments.has_key('weight_file')) \ or (not arguments.has_key('model_files')) \ or (not arguments.has_key('deeplearn_path'))): print "Error: the mandatory arguments --deeplearn-path --model-files --major-vote-prob --ark-file --weight-file --wdir --output-file-prefix" exit(1) # mandatory arguments ark_file = arguments['ark_file'] wdir = os.path.abspath(arguments['wdir']) output_file_prefix = arguments['output_file_prefix'] sWeightFile = arguments['weight_file']
async def wap(self, ctx: commands.Context, *args): url = 'hsm4poTWjMs' opt, args = parse_arguments(args) await self.music_handler.play_song(ctx, url, opt=opt)
async def _help(self, ctx: commands.Context, *args): url = 'yD2FSwTy2lw' opt, args = parse_arguments(args) await self.music_handler.play_song(ctx, url, opt=opt)
async def rip(self, ctx: commands.Context, *args): url = ' '.join(args) + ' siivagunner' opt, args = parse_arguments(args) await self.music_handler.play_song(ctx, url, opt=opt)
squares.append(board[i*w:(i+1)*w, j*h:(j+1)*h]) names.append(ranks[j]+files[7-i]) squares = np.array(squares) squares = squares.reshape(squares.shape[0], 64, 64, 1) return squares, names if __name__ == "__main__": # Extract all squares from all boards print("Extracting squares...") #board_dir = "../data/boards/" #square_dir = "../data/squares/" (_, board_dir, square_dir) = parse_arguments() board_filenames = listdir_nohidden(board_dir) filenames = [f for f in board_filenames] board_imgs = [cv2.imread(board_dir+f, 0) for f in filenames] for f, b in zip(filenames, board_imgs): squares, names = extract_squares(b) for sq, name in zip(squares, names): #print("{}{}".format(name, sq.shape)) #cv2.imwrite(square_dir + name + f, sq) pass print("\rExtracting squares...DONE")
f"accuracy: {val_accuracy}, test accuracy: {current_test_accuracy}" ) metrics.append((total_loss / n_train_batches, sum(val_losses) / n_val_batches, val_accuracy, current_test_accuracy)) # for plotting learning curve print(f"Total training time: {(time.time() - start_ts):.2f}s") model.save_experiment(saved_metrics, metrics) print("Finished training") if __name__ == '__main__': batch_size, epochs, num_workers, test_split, valid_split, test_and_plot, pre_trained = parse_arguments( ) # ROOT_PATH = Path('/home/diego/Documents/RUG/CognitiveRobotics/Grasping_Detection_System') # PATH_TO_DATA = ROOT_PATH / 'debug_dataset' ROOT_PATH = Path('/home/s3736555/Grasping_Detection_System') PATH_TO_DATA = ROOT_PATH / 'dataset' PATH_TO_POS_LABELS = ROOT_PATH / 'labels/pos_labels.csv' PATH_TO_OUTPUTS = ROOT_PATH / 'output' # Make sure output exists if not PATH_TO_OUTPUTS.exists(): Path.mkdir(PATH_TO_OUTPUTS, parents=True) BATCH_SIZE = batch_size NUM_WORKERS = num_workers TEST_SPLIT = test_split VALIDATION_SPLIT = valid_split RANDOM_SEED = 42 PRE_TRAINED = pre_trained
async def bd(self, ctx: commands.Context, *args): url = 'i63cgUeSsY0' opt, args = parse_arguments(args) await self.music_handler.play_song(ctx, url, opt=opt)
from __future__ import division, print_function, absolute_import import os import tensorflow as tf import dataset import network import util slim = tf.contrib.slim print('Setting up run') nc, dc, rc = util.parse_arguments() run_name = util.run_name(nc, dc, rc) flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('checkpoint_dir', 'checkpoint/' + run_name + '/', 'output directory for model checkpoints') flags.DEFINE_string('summary_dir', 'logs/' + run_name, 'output directory for training summaries') flags.DEFINE_float('gamma', 0.5, 'learning rate change per step') flags.DEFINE_float('learning_rate', 0.03, 'learning rate change per step') dataset_names = ['freefield1010', 'warblr'] if not tf.gfile.Exists(FLAGS.checkpoint_dir): print('Making checkpoint dir') os.makedirs(FLAGS.checkpoint_dir) if not tf.gfile.Exists(FLAGS.summary_dir): print('Making summary dir')
async def dmc(self, ctx: commands.Context, *args): url = 'RofLs15xbaE' opt, args = parse_arguments(args) await self.music_handler.play_song(ctx, url, opt=opt)
mean_accuracy = (total_correct / float(total_seen)) mean_class_accuracy = np.mean(total_correct_class / total_seen_class) print('Mean loss : %f' % mean_loss) print('Mean accuracy : %f' % mean_accuracy) print('Avg class accuracy : %f' % mean_class_accuracy) train_writer.add_summary(summary) if epoch % snapshot_interval == 0 or epoch == max_epoch - 1: snapshot_file = "./{}_snapshot_{}.tf".format( model_name, epoch) saver.save(session, snapshot_file) print('Model %s saved' % snapshot_file) global_step_val, learning_rate_val = session.run( [global_step, learning_rate]) print('Global step : ', global_step_val) print('Learning rate : ', learning_rate_val) with open("train_info_{}.txt".format(model_name), 'a') as f: f.write(str(datetime.datetime.now().strftime("%c")) + ', ' + \ str(epoch) + ', ' + str(mean_loss) + ', ' + str(mean_accuracy) + ', ' + str(mean_class_accuracy) + '\n') f.close() d.next_epoch() if __name__ == "__main__": args = util.parse_arguments("../param.json") train(args)
async def fri(self, ctx: commands.Context, *args): url = 'kfVsfOSbJY0' opt, args = parse_arguments(args) await self.music_handler.play_song(ctx, url, opt=opt)
def main(): arguments = parse_arguments() insert() crawl(arguments)
from openpyxl import load_workbook import util import config def autofill_excel(input_file, show_brand, scb): sheet = load_workbook(input_file) wb = sheet[sheet.get_sheet_names()[0]] row = 2 while wb[config.price + str(row)].value is not None: util.copy_description(wb, row) util.collect_names(wb, row, show_brand, scb) util.cell_to_null(wb, row, config.link) util.cell_to_null(wb, row, 'T') util.show_on_site(wb, row) row += 1 sheet.save(input_file[:-5] + '(1).xlsx') if __name__ == '__main__': autofill_excel(*util.parse_arguments())
Example: draw_circle.py out.kml "48 51 29.1348N,2 17 40.8984E" """ import docopt import polycircles.polycircles import util __author__ = 'peter' args = docopt.docopt(__doc__) args, kml = util.parse_arguments(args) print u'Drawing circle for coordinates {0}...'.format(args['<coords>']) lat, lon = util.parse_latlon(args['<coords>']) circle = polycircles.polycircles.Polycircle(lat, lon, float(args['<radius>']), int(args['<sides>']) or 20) p = kml.newpolygon(name=args['--name'], outerboundaryies=circle.to_kml()) print u'Setting colour...' p.style.polystyle.color = util.parse_color(args['--colour']) if '--description' in args: print u'Setting description...' p.description = args['--description'] print u'Saving to {0}...'.format(args['<outfile>'])
from rename_images import rename_images from resize_images import resize_images from util import parse_arguments """Renames the source images and saves resized copies in the output directory""" # Usage: # python chessvision/data_processing/process_new_raw.py -d data/new_raw -o data/board_extraction/new_raw_resized _, indir, outdir = parse_arguments() rename_images(indir) resize_images(indir, outdir)