def main(): parser = create_parser() args = parser.parse_args(sys.argv[1:]) domain = args.domain.split(':')[0] port = args.domain.split(':')[1] \ if len(args.domain.split(':')) == 2 else 80 timeout_ms = float(args.timeout) count = int(args.count) host = socket.gethostbyname(f'{domain}') ping(host, port, timeout_ms, count)
def main(): """ Main function. Enter point of application """ parser = create_parser() namespace = parser.parse_args() # get arguments from parser learning_rate = namespace.lr data_set = datasets.load_digits() full_amount = len(data_set.images) train_amount = full_amount // 2 if learning_rate < 0 or learning_rate > 1: print( "Learning rate must be in interval from 0 to 1 \nYou value is: {}". format(learning_rate)) sys.exit() if train_amount < 1: print("Train amount must be greater than 1 \nYou value is: {}".format( train_amount)) sys.exit() # To apply a classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: data = data_set.images.reshape((full_amount, -1)) # Create a classifier: a support vector classifier classifier = MLPClassifier(solver='sgd', activation='logistic', hidden_layer_sizes=(64, ), learning_rate='constant', learning_rate_init=learning_rate, verbose=True) # We learn the digits on the first half of the digits classifier.fit(data[:train_amount], data_set.target[:train_amount]) # Now predict the value of the digit on the second half: expected = data_set.target[train_amount:] predicted = classifier.predict(data[train_amount:]) print("Classification report for classifier %s:\n%s\n" % (classifier, metrics.classification_report(expected, predicted))) print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)) def classify(matrix): return classifier.predict(matrix) matrix_input = MatrixInput() matrix_input.show(classify)
def main() -> None: parser = create_parser() args = parser.parse_args() logger.debug(f"Running with args: n: {args.n}, s: {args.s}, r: {args.r}") logger.info(f"Starting UDP server listening on: {APP_HOST}:{APP_PORT}") loop = asyncio.get_event_loop() asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) transport, protocol = loop.run_until_complete( loop.create_datagram_endpoint( lambda: MessagePrinterServerProtocol(args.n, args.s, args.r), local_addr=(APP_HOST, APP_PORT), )) try: loop.run_forever() except KeyboardInterrupt: pass finally: transport.close() loop.close()
def main(): logging.basicConfig(level=logging.DEBUG) # Parser and args parser = create_parser() args = parser.parse_args() # Setup resources and dirs dest = open(args.out, 'w') res_dir = os.path.split(os.path.abspath(__file__))[0] template = open(os.path.join(res_dir, 'template.html'), 'r').read() output = HTMLOutput(dest, template) cache_dir = os.path.split(args.out)[0] # Use cache dbs = [FilmwebDatabase()] if not args.force: cache = load_cache(cache_dir, args.out) if cache: logging.info("using cache file") dbs = cache # Get movies movies = find_movies_info(args.dirs, dbs, output, '-rating') # Histogram? if args.histogram: path = os.path.join(cache_dir, '.movierank-histogram.png') histogram(movies, path) output.add_extra('histogram', path) # Finish store_cache(cache_dir, dbs, suffix=args.out) output.flush() # Run browser? if args.run: subprocess.Popen(["xdg-open", args.out], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
def main(): parser = create_parser() game_field = ConsoleField(parser.size) controller = ConsoleController( game_field, ScoreBoard.load_from(parser.records, parser.hint_mode), parser.debug) BallGenerator.place_balls( ConsoleField(parser.size), controller, BallGenerator.generate_balls(10, parser.debug, False)) cls() print("""Hello there! to start playing print 'start' use 'help' to get other commands Good luck!""") while not controller.is_over: try: run_command(controller) except (KeyboardInterrupt, EOFError): print("\nTo stop the game type 'exit'") while 1: cls() print("""Game Over You scored {} points Please input your name """.format(controller.current_score)) try: controller.score_table.set_name(input()) break except ValueError as e: print(e) controller.score_table.save()
def build_classifier(args,tasker): if 'node_cls' == args.task or 'static_node_cls' == args.task: mult = 1 else: mult = 2 if 'gru' in args.model or 'lstm' in args.model: in_feats = args.gcn_parameters['lstm_l2_feats'] * mult elif args.model == 'skipfeatsgcn' or args.model == 'skipfeatsegcn_h': in_feats = (args.gcn_parameters['layer_2_feats'] + args.gcn_parameters['feats_per_node']) * mult else: in_feats = args.gcn_parameters['layer_2_feats'] * mult return mls.Classifier(args,in_features = in_feats, out_features = tasker.num_classes).to(args.device) if __name__ == '__main__': parser = u.create_parser() args = u.parse_args(parser) global rank, wsize, use_cuda args.use_cuda = (torch.cuda.is_available() and args.use_cuda) args.device='cpu' if args.use_cuda: args.device='cuda' print ("use CUDA:", args.use_cuda, "- device:", args.device) try: dist.init_process_group(backend='mpi') #, world_size=4 rank = dist.get_rank() wsize = dist.get_world_size() print('Hello from process {} (out of {})'.format(dist.get_rank(), dist.get_world_size())) if args.use_cuda:
def __init__(self): self.transport = socket(AF_INET, SOCK_STREAM) self.addr, self.port = create_parser()
classifier.fit(X_training_transform, Y_training) time_b = time.perf_counter() timings[2, i] = time_b - time_a # -- test -------------------------------------------------------------- time_a = time.perf_counter() results[i] = classifier.score(X_test_transform, Y_test) time_b = time.perf_counter() timings[3, i] = time_b - time_a return results, timings if __name__ == '__main__': arguments = create_parser() if arguments.update_data: # print(arguments.if_split) update_data(True) dataset_range = \ ( 50, 100, 200, 300, 400, 500 ) results_dataset = pd.DataFrame(index=dataset_range,
import sys from utils import create_parser, post_servicecall from createcalls import run_call, run_av_check, run_new_task import constants # Выхов обработчика команд parser = create_parser() namespace = parser.parse_args(sys.argv[1:]) print(namespace) # Обработка полученных команд if namespace.command == 'phonecall': run_call(namespace) elif namespace.command == 'avcheck': run_av_check(namespace) elif namespace.command == 'newticket': run_new_task(namespace)
plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC of LSTM anomaly detector') plt.legend(loc="lower right") plt.show() return roc_auc if __name__ == '__main__': import argparse import sys import traceback try: args = utils.create_parser() np.random.seed(0) torch.manual_seed(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if args.debug: print(args) if args.training: train(args) else: eval_detector(args) except SystemExit:
def __init__(self, user='******'): self.connection = socket(AF_INET, SOCK_STREAM) self.account_name = user self.addr, self.port = create_parser()
import skimage import skimage.io import transforms3d import math import matplotlib.pyplot as plt from PIL import Image, ImageDraw, ImageFont import random import utils import models.architectures as architectures from data.load_ops import resize_rescale_image from data.load_ops import rescale_image import utils import lib.data.load_ops as load_ops import task_viz parser = utils.create_parser("Viz Single Task") tf.logging.set_verbosity(tf.logging.ERROR) list_of_tasks = 'autoencoder curvature denoise edge2d edge3d \ keypoint2d keypoint3d colorization jigsaw \ reshade rgb2depth rgb2mist rgb2sfnorm \ room_layout segment25d segment2d vanishing_point \ segmentsemantic class_1000 class_places inpainting_whole' list_of_tasks = list_of_tasks.split() def prepare_image(task, im_name, cfg): img = task_viz.load_raw_image_center_crop(im_name) img = skimage.img_as_float(img) scipy.misc.toimage(np.squeeze(img), cmin=0.0, cmax=1.0).save(im_name)
import scipy import skimage import skimage.io import transforms3d import math import matplotlib.pyplot as plt from PIL import Image, ImageDraw, ImageFont import random import utils import models.architectures as architectures from data.load_ops import resize_rescale_image from data.load_ops import rescale_image import utils import lib.data.load_ops as load_ops parser = utils.create_parser("Viz Multiple Task") tf.logging.set_verbosity(tf.logging.ERROR) list_of_tasks = 'ego_motion \ fix_pose \ non_fixated_pose \ point_match' list_of_tasks = list_of_tasks.split() def run_to_task(): import general_utils from general_utils import RuntimeDeterminedEnviromentVars tf.logging.set_verbosity(tf.logging.ERROR)