Пример #1
0
 def init_data_test(self):
     """
     Init test data.
     If the test case need some extra test date then just override this method.
     """
     test_name = os.path.splitext(os.path.basename(inspect.getfile(self.__class__)))[0]
     self.test_result = TestResult(test_name)
     self.steps = Steps()
     self.logger = Logger(test_name)
Пример #2
0
 def __init__(self, test_name):
     """
     Init some specify data that is different among sub test classes.
     Should be called first by super().__init__ in all sub test classes.
     :param test_name: name of test case.
     """
     self.test_result = TestResult(test_name)
     self.steps = Steps()
     self.logger = Logger(test_name)
Пример #3
0

def gpu_memory_in_use(gpu='/device:GPU:0', log=None):
    from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import BytesInUse
    with tf.device(gpu):
        bytes_in_use = BytesInUse()
    with tf.Session() as sess:
        val = sess.run(bytes_in_use)
    if log is not None:
        log.P("  GPU free mem: {:.1f} Gb".format(val / (1024**3)))
    return val


if __name__ == '__main__':
    cfg = 'config/duplex_config.txt'
    log = Logger(lib_name='MGS', config_file=cfg, TF_KERAS=True)

    yolo = log.LoadGraphFromModels('01_1712_y_720_1280_c.pb')
    face = log.LoadGraphFromModels('20_190301_mob_ssd_faces.pb')

    config_proto = tf.compat.v1.ConfigProto()
    config_proto.gpu_options.allow_growth = True
    yolo_sess = tf.compat.v1.Session(graph=yolo, config=config_proto)
    log.P("Created yolo session")

    config_proto = tf.compat.v1.ConfigProto()
    config_proto.gpu_options.allow_growth = True
    face_sess = tf.compat.v1.Session(graph=face, config=config_proto)
    log.P("Created ssd session")

    tf_face_classes = face_sess.graph.get_tensor_by_name("detection_classes:0")
def main():
    setpu_seed(2021)
    args = parse_args()
    save_path = os.path.join(args.outf, args.save)

    device = torch.device(
        "cuda" if torch.cuda.is_available() and args.cuda else "cpu")
    cudnn.benchmark = True

    log = Logger(save_path)
    sys.stdout = Print_Logger(os.path.join(save_path, 'train_log.txt'))
    print('The computing device used is: ',
          'GPU' if device.type == 'cuda' else 'CPU')

    # net = models.UNetFamily.U_Net(1,2).to(device)
    net = models.LadderNet(inplanes=1, num_classes=2, layers=3,
                           filters=16).to(device)
    print("Total number of parameters: " + str(count_parameters(net)))

    log.save_graph(
        net,
        torch.randn((1, 1, 64, 64)).to(device).to(
            device=device))  # Save the model structure to the tensorboard file
    # torch.nn.init.kaiming_normal(net, mode='fan_out')      # Modify default initialization method
    # net.apply(weight_init)

    # The training speed of this task is fast, so pre training is not recommended
    if args.pre_trained is not None:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        checkpoint = torch.load(args.outf +
                                '%s/latest_model.pth' % args.pre_trained)
        net.load_state_dict(checkpoint['net'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        args.start_epoch = checkpoint['epoch'] + 1

    # criterion = LossMulti(jaccard_weight=0,class_weights=np.array([0.5,0.5]))
    criterion = CrossEntropyLoss2d()  # Initialize loss function

    # create a list of learning rate with epochs
    # lr_epoch = np.array([50, args.N_epochs])
    # lr_value = np.array([0.001, 0.0001])
    # lr_schedule = make_lr_schedule(lr_epoch,lr_value)
    # lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=10,gamma=0.5)
    # optimizer = optim.SGD(net.parameters(),lr=lr_schedule[0], momentum=0.9, weight_decay=5e-4, nesterov=True)
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                        T_max=args.N_epochs,
                                                        eta_min=0)

    train_loader, val_loader = get_dataloader(args)  # create dataloader

    if args.val_on_test:
        print(
            '\033[0;32m===============Validation on Testset!!!===============\033[0m'
        )
        val_tool = Test(args)

    best = {
        'epoch': 0,
        'AUC_roc': 0.5
    }  # Initialize the best epoch and performance(AUC of ROC)
    trigger = 0  # Early stop Counter
    for epoch in range(args.start_epoch, args.N_epochs + 1):
        print('\nEPOCH: %d/%d --(learn_rate:%.6f) | Time: %s' % \
            (epoch, args.N_epochs,optimizer.state_dict()['param_groups'][0]['lr'], time.asctime()))

        # train stage
        train_log = train(train_loader, net, criterion, optimizer, device)
        # val stage
        if not args.val_on_test:
            val_log = val(val_loader, net, criterion, device)
        else:
            val_tool.inference(net)
            val_log = val_tool.val()

        log.update(epoch, train_log, val_log)  # Add log information
        lr_scheduler.step()

        # Save checkpoint of latest and best model.
        state = {
            'net': net.state_dict(),
            'optimizer': optimizer.state_dict(),
            'epoch': epoch
        }
        torch.save(state, os.path.join(save_path, 'latest_model.pth'))
        trigger += 1
        if val_log['val_auc_roc'] > best['AUC_roc']:
            print('\033[0;33mSaving best model!\033[0m')
            torch.save(state, os.path.join(save_path, 'best_model.pth'))
            best['epoch'] = epoch
            best['AUC_roc'] = val_log['val_auc_roc']
            trigger = 0
        print('Best performance at Epoch: {} | AUC_roc: {}'.format(
            best['epoch'], best['AUC_roc']))
        # early stopping
        if not args.early_stop is None:
            if trigger >= args.early_stop:
                print("=> early stopping")
                break
        torch.cuda.empty_cache()
Пример #5
0

from libraries.logger import Logger
from duplex.utils import get_tx2_and_config_file
from duplex.duplex_engine import DuplexEngine

import sys

if __name__ == '__main__':

  is_tx2, cfg = get_tx2_and_config_file()
  log = Logger(lib_name='ODJD',  config_file=cfg,  TF_KERAS=True)

  args = [x.upper() for x in sys.argv]
  

  log.P("Args: {}".format(args))
  
  _debug = 'DEBUG' in args
  show_debug = 'NOSHOW' not in args
  use_face = 'NOFACE' not in args
  use_emotion = 'NOEMO' not in args
  show_boxes = 'NOBOX' not in args
    
  
  eng = DuplexEngine(log=log, 
                     runs_on_device=is_tx2, 
                     debug=_debug,
                     use_face=use_face,
                     use_emotion=use_emotion,
                     boxes=show_boxes,
Пример #6
0
  log.P("*"*50)
  log.P("LocalConfig v{} received config data:".format(__VER__))
  dct_prev = log.load_data_json(fn_config)
  log.P("  Current: {}".format(dct_prev))
  log.save_data_json(dct_config, fn_config )
  dct_new = log.load_data_json(fn_config)
  log.P("  New:     {}".format(dct_config))
  jresponse = flask.jsonify({
            "RECEIVED_CONFIG_UPDATE": dct_new})    
  jresponse.headers["Access-Control-Allow-Origin"] = "*"
  jresponse.headers["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS, DELETE"
  jresponse.headers["Access-Control-Allow-Headers"] = "Content-Type"
  log.P("*"*50)
  return jresponse
  
  

if __name__ == '__main__':
  config_file = 'config/duplex_config.txt'
  log = Logger(lib_name='ODJcfg', config_file=config_file,  TF_KERAS=False)
  log.P("Starting OmniDJ local config server {}".format(__VER__))
  dct_viewer = log.load_data_json(fn_config)
  log.P("Currend config:\n{}".format(dct_viewer))
  
  app = flask.Flask('LocalConfigServer')
  app.add_url_rule(rule='/config', 
                   endpoint="LocalConfig", 
                   view_func=config_update, 
                   methods = ['GET', 'POST','OPTIONS']
                   )  
  app.run(host='127.0.0.1', port=5500)
Пример #7
0
    def __init__(self, exec_path, nodeid, bind_ip, webuiport, comport, masterip=None, masterport=None):
        self.ismaster = True
        self.exec_path = exec_path
        self.nodeid = nodeid
        self.bindip = bind_ip
        self.webuiport = webuiport
        self.comport = comport
        self.masterip = masterip
        self.masterport = masterport
        self.user_appdata_directory = None
        self.started_at = time.time()
        self.last_tick_at = self.started_at
        self.uptime = 0
        self.public_ip = "Not yet known"


        if platform.system() == "Darwin":
            self.user_appdata_directory = os.path.expanduser("~/Library/ServerSync/%s" % (self.nodeid))
        elif platform.system() == "Linux":
            self.user_appdata_directory = os.path.expanduser("~/.config/ServerSync/%s" % (self.nodeid)) # inspired by Google Chrome location
        elif platform.system() == "Windows":
            self.user_appdata_directory = os.path.expanduser("~\\AppData\\Local\\ServerSync\\%s" % (self.nodeid))
        else:
            raise("Platform not supported")

        if masterip is None:
            self.print("Setting up node %s as master\n" % (nodeid))
        else :
            self.ismaster = False
            self.print("Setting up node %s.\n" % (nodeid))
            self.print("  will ask peer list to node at %s:%s\n" % (masterip, masterport))

        self.print("Control panel is on port %s\n" % (webuiport))
        self.print("Listen for other nodes on port %s\n" % (comport))

        # create a directory for the application if not aready exists
        if not os.path.exists(self.user_appdata_directory):
            os.makedirs(self.user_appdata_directory)

        self.updater = Updater()

        #m = multiprocessing.Manager()
        self.q_peerstore_in = multiprocessing.Queue() #m.Queue() #for networked multiprocessing
        self.q_peerstore_out = multiprocessing.Queue()
        self.q_comwebserver_in = multiprocessing.Queue() #m.Queue() #for networked multiprocessing
        self.q_comwebserver_out = multiprocessing.Queue()
        self.q_client_in = multiprocessing.Queue()
        self.q_client_out = multiprocessing.Queue()
        self.q_uiwebserver_in = multiprocessing.Queue()
        self.q_uiwebserver_out = multiprocessing.Queue()
        self.q_logger_in = multiprocessing.Queue()
        self.q_logger_out = multiprocessing.Queue()
        self.q_console_out = multiprocessing.Queue()
        self.peerstore = PeerStore(THREAD_SLEEP_TIME, self.q_peerstore_in, self.q_peerstore_out, "PeerStore", self.user_appdata_directory, self.comport, self.nodeid, self.masterip, self.masterport)
        self.uiwebserver = UIWebServer(THREAD_SLEEP_TIME, self.exec_path, self.bindip, self.q_uiwebserver_in, self.q_uiwebserver_out, self.webuiport, self.nodeid, "UIWebServer")
        self.logger = Logger(THREAD_SLEEP_TIME, self.q_logger_in, self.q_logger_out, self.nodeid, "Logger")
        self.comwebserver = ComWebServer(THREAD_SLEEP_TIME, self.exec_path, self.bindip, self.q_comwebserver_in, self.q_comwebserver_out, self.comport, self.nodeid, "ComWebServer")
        self.client = Client(THREAD_SLEEP_TIME, self.exec_path, self.bindip, self.q_client_in, self.q_client_out, self.nodeid, "Client")
        self.console = Console(self.q_console_out, sys.stdin, sys.stdout, self.nodeid, "Console")


        self.running = False

        self.should_restart = False
Пример #8
0
class ServerSync(object):
    def __init__(self, exec_path, nodeid, bind_ip, webuiport, comport, masterip=None, masterport=None):
        self.ismaster = True
        self.exec_path = exec_path
        self.nodeid = nodeid
        self.bindip = bind_ip
        self.webuiport = webuiport
        self.comport = comport
        self.masterip = masterip
        self.masterport = masterport
        self.user_appdata_directory = None
        self.started_at = time.time()
        self.last_tick_at = self.started_at
        self.uptime = 0
        self.public_ip = "Not yet known"


        if platform.system() == "Darwin":
            self.user_appdata_directory = os.path.expanduser("~/Library/ServerSync/%s" % (self.nodeid))
        elif platform.system() == "Linux":
            self.user_appdata_directory = os.path.expanduser("~/.config/ServerSync/%s" % (self.nodeid)) # inspired by Google Chrome location
        elif platform.system() == "Windows":
            self.user_appdata_directory = os.path.expanduser("~\\AppData\\Local\\ServerSync\\%s" % (self.nodeid))
        else:
            raise("Platform not supported")

        if masterip is None:
            self.print("Setting up node %s as master\n" % (nodeid))
        else :
            self.ismaster = False
            self.print("Setting up node %s.\n" % (nodeid))
            self.print("  will ask peer list to node at %s:%s\n" % (masterip, masterport))

        self.print("Control panel is on port %s\n" % (webuiport))
        self.print("Listen for other nodes on port %s\n" % (comport))

        # create a directory for the application if not aready exists
        if not os.path.exists(self.user_appdata_directory):
            os.makedirs(self.user_appdata_directory)

        self.updater = Updater()

        #m = multiprocessing.Manager()
        self.q_peerstore_in = multiprocessing.Queue() #m.Queue() #for networked multiprocessing
        self.q_peerstore_out = multiprocessing.Queue()
        self.q_comwebserver_in = multiprocessing.Queue() #m.Queue() #for networked multiprocessing
        self.q_comwebserver_out = multiprocessing.Queue()
        self.q_client_in = multiprocessing.Queue()
        self.q_client_out = multiprocessing.Queue()
        self.q_uiwebserver_in = multiprocessing.Queue()
        self.q_uiwebserver_out = multiprocessing.Queue()
        self.q_logger_in = multiprocessing.Queue()
        self.q_logger_out = multiprocessing.Queue()
        self.q_console_out = multiprocessing.Queue()
        self.peerstore = PeerStore(THREAD_SLEEP_TIME, self.q_peerstore_in, self.q_peerstore_out, "PeerStore", self.user_appdata_directory, self.comport, self.nodeid, self.masterip, self.masterport)
        self.uiwebserver = UIWebServer(THREAD_SLEEP_TIME, self.exec_path, self.bindip, self.q_uiwebserver_in, self.q_uiwebserver_out, self.webuiport, self.nodeid, "UIWebServer")
        self.logger = Logger(THREAD_SLEEP_TIME, self.q_logger_in, self.q_logger_out, self.nodeid, "Logger")
        self.comwebserver = ComWebServer(THREAD_SLEEP_TIME, self.exec_path, self.bindip, self.q_comwebserver_in, self.q_comwebserver_out, self.comport, self.nodeid, "ComWebServer")
        self.client = Client(THREAD_SLEEP_TIME, self.exec_path, self.bindip, self.q_client_in, self.q_client_out, self.nodeid, "Client")
        self.console = Console(self.q_console_out, sys.stdin, sys.stdout, self.nodeid, "Console")


        self.running = False

        self.should_restart = False

    def start(self):

        # we will fork the subprocesses now so we disable the signal handling for them
        # we will enable it later
        signal.signal(signal.SIGINT, signal.SIG_IGN)


        # start process
        self.comwebserver.start()
        self.uiwebserver.start()
        self.client.start()
        self.logger.start()
        self.peerstore.start()

        # catch CTRL+C
        signal.signal(signal.SIGINT, self.signal_handler)

        # start threads
        self.console.start()


        self.running = True

        self.q_logger_in.put(Message("System", "Logger", "register", ["System"]))

        while(self.running):
            time.sleep(MAIN_SLEEP_TIME)

            m = self.process_queues()

            # each second we send a tick to some of the processes telling them since how manytime we run
            if int(time.time() - self.last_tick_at) >= 1:
                self.last_tick_at = time.time()
                self.uptime = int(time.time() - self.started_at)
                self.q_peerstore_in.put(Message(self.nodeid, "PeerStore", "tick", [self.uptime]))
                self.q_comwebserver_in.put(Message(self.nodeid, "ComWebServer", "tick", [self.uptime]))
                self.q_uiwebserver_in.put(Message(self.nodeid, "UIWebServer", "tick", [self.uptime]))
                self.q_logger_in.put(Message(self.nodeid, "Logger", "tick", [self.uptime]))

            # We send poison pill to threads only if they are not configured to loop based on a sleep time (blocking queues)
            if THREAD_SLEEP_TIME is 0:
                self.poison_pill_queues()



        self.print("Shutdown client and server processes...\n")

        # ask the client and server process to shut down, except if we received a kill signal
        self.print("Wait for gracefull stop\n")
        self.q_client_in.put(Message(self.nodeid, "Client", "kill", []))
        self.q_comwebserver_in.put(Message(self.nodeid, "ComWebServer", "kill", []))
        self.q_uiwebserver_in.put(Message(self.nodeid, "UIWebServer", "kill", []))
        self.q_logger_in.put(Message(self.nodeid, "Logger", "kill", []))
        self.q_peerstore_in.put(Message(self.nodeid, "PeerStore", "kill", []))

        time.sleep(1)


        # read what remains in the queues
        self.process_queues()

        # Wait for the worker to finish
        self.comwebserver.join()
        self.print("ComWebServer joined\n")
        self.uiwebserver.join()
        self.print("UIWebServer joined\n")
        self.logger.join()
        self.print("Logger joined\n")
        self.client.join()
        self.print("Client joined\n")
        self.peerstore.join()
        self.print("PeerStore joined\n")
        self.console.cancel()
        self.print("Console joined\n")

        self.print("Stopped\n")

        return self.should_restart

    def poison_pill_queues(self):
        self.q_comwebserver_in.put(Message(self.nodeid, "ComWebServer", "pill", []))
        self.q_uiwebserver_in.put(Message(self.nodeid, "UIWebServer", "pill", []))
        self.q_logger_in.put(Message(self.nodeid, "Logger", "pill", []))
        self.q_peerstore_in.put(Message(self.nodeid, "PeerStore", "pill", []))

    def process_queues(self):
        q_comwebserver_out_empty = False
        q_client_out_empty = False
        q_uiwebserver_out_empty = False
        q_logger_out_empty = False
        q_peerstore_out_empty = False
        q_console_out_empty = False


        while (not q_comwebserver_out_empty) or (not q_client_out_empty) or (not q_uiwebserver_out_empty) or (not q_logger_out_empty) or (not q_peerstore_out_empty) or (not q_console_out_empty):
            try:
                m = self.q_console_out.get(False)
                self.dispatch_message(m)
            except Exception as e:
                q_console_out_empty = True

            try:
                m = self.q_uiwebserver_out.get(False)
                self.dispatch_message(m)
            except Exception as e:
                q_uiwebserver_out_empty = True

            try:
                m = self.q_logger_out.get(False)
                self.dispatch_message(m)
            except Exception as e:
                q_logger_out_empty = True

            try:
                m = self.q_comwebserver_out.get(False)
                self.dispatch_message(m)
            except Exception as e:
                q_comwebserver_out_empty = True

            try:
                m = self.q_peerstore_out.get(False)
                self.dispatch_message(m)
            except Exception as e:
                q_peerstore_out_empty = True

            try:
                m = self.q_client_out.get(False)
                self.dispatch_message(m)
            except Exception as e:
                q_client_out_empty = True

    def dispatch_message(self, message):
        try:
            #
            # intercept message like exit/quit/restart
            #
            if message.destination == "System":
                if message.command == "halt" or message.command == "stop":
                    self.running = False
                elif message.command == "restart":
                    self.running = False
                    self.should_restart = True
                elif message.command == "PublicIpDiscovered":
                    self.public_ip = message.arguments[0]
                elif message.command == "GetVersion":
                    self.print("\n\nreceived Get Version FROM %s\n\n" % (message.sender_id))
                    m = Message("System", message.sender_id, "GetVersion_ret",
                                [{
                                    "current_version": self.updater.current_version,
                                    "new_version": self.updater.new_version_available
                                }], communication_id=message.communication_id)
                    self.q_uiwebserver_in.put(m)
                elif message.command == "increment":
                    message.destination = "UIWebServer" # useless step ? the message wil not be dispatched anymore, no nee to change its dest
                    self.q_uiwebserver_in.put(message)
                elif message.command == "uptime":
                    m = Message("System", "Logger", "print", ["Running since %s (hh:mm:ss)\n" % (time.strftime('%H:%M:%S', time.gmtime(self.uptime)))])
                    self.q_logger_in.put(m)
                elif message.command == "print":
                    self.print("%s %s %s\n"%(message.sender_id, message.command, message.arguments))
                elif message.command == "exception":
                    self.print("%s Catched exception: %s \n"%(message.sender_id, [message.arguments[0]]))
                elif message.command == "NodeInfo":
                    m = Message("System", message.sender_id, "NodeInfo_ret",
                                [{"ismaster": self.ismaster, "nodeid": self.nodeid,
                                 "webuiport": self.webuiport, "comport": self.comport,
                                 "masterip": self.masterip, "masterport": self.masterport,
                                 "user_appdata_directory": self.user_appdata_directory,
                                 "started_at": self.started_at, "uptime": self.uptime,
                                 "public_ip": self.public_ip,
                                  "version": self.updater.current_version
                                  }], communication_id=message.communication_id)
                    self.q_uiwebserver_in.put(m)

            #
            # we also intercept print comands
            #
            elif message.destination == "Logger":
                if self.running:
                    self.q_logger_in.put(message)
                else:
                    # if no logger running we fallback to the system print
                    self.print("%s %s %s\n"%(message.sender_id, message.command, message.arguments))

            elif message.destination == "PeerStore":
                self.q_peerstore_in.put(message)

            elif message.destination == "UIWebServer":
                self.q_uiwebserver_in.put(message)

            elif message.destination == "ComWebServer":
                self.q_comwebserver_in.put(message)

            elif message.destination == "Client":
                self.q_client_in.put(message)


            #
            # dispatch other messages
            #
            else :
                self.print("Message dispatcher received a msg for the unknown destination '%s'\n" % (message.destination))

        except Exception as e:
            self.print("Exception while dispatching message '%s'\n" % ([e]))

    def print(self, message):
        sys.stdout.write("%s => %s" % (datetime.datetime.utcnow().strftime("%Y %b %d %H:%M:%S %Z "), message))
        sys.stdout.flush()

    def signal_handler(self, sig, frame):
        if sig == signal.SIGINT:
            self.print("Please type 'halt' or 'stop' to close the program or 'restart' to full restart\n")
Пример #9
0
"""
import tensorflow as tf
from libraries.logger import Logger


def combine_graphs(graph1, graph2, name1='g1', name2='g2'):
    gdef_1 = graph1.as_graph_def()
    gdef_2 = graph2.as_graph_def()
    with tf.Graph().as_default() as g_combined:
        tf.import_graph_def(graph_def=gdef_1, name=name1)
        tf.import_graph_def(graph_def=gdef_2, name=name2)
    return g_combined


if __name__ == '__main__':
    log = Logger(lib_name='GC', config_file='config/duplex_config.txt')
    g1 = log.LoadGraphFromModels('01_1712_y_720_1280_c.pb')
    n1 = 'yolo'
    g2 = log.LoadGraphFromModels('20_190301_mob_ssd_faces.pb')
    n2 = 'face'
    gc = combine_graphs(g1, g2, name1=n1, name2=n2)

    config_proto = tf.compat.v1.ConfigProto()
    config_proto.gpu_options.allow_growth = True
    sess = tf.compat.v1.Session(graph=gc, config=config_proto)

    earning_phase = sess.graph.get_tensor_by_name(n1 +
                                                  "/keras_learning_phase:0")
    tf_classes = sess.graph.get_tensor_by_name(n1 + "/YOLO_OUTPUT_CLASSES" +
                                               ":0")
    tf_scores = sess.graph.get_tensor_by_name(n1 + "/YOLO_OUTPUT_SCORES" +