Esempio n. 1
0
def _main():
    last = ''
    file_flag = False
    while True:
        file_flag = False
        raw_data = input()

        if raw_data == 'restart':
            msw_queue.put(0)
            break
        if raw_data == 'exit':
            msw_queue.put(1)
            break
        if raw_data == 'update':
            raw_data = 'update:compress;update_to:*'
        if raw_data == '1':
            raw_data = 'ffmpeg:autostart'
        if raw_data == '2':
            raw_data = 'ffmpeg:enable;to:*,server:miku'
        if raw_data == 'r':
            raw_data = last

        last = raw_data

        if raw_data[:6] == '(file)':  # like "(file)log: filename.exe"
            raw_data = raw_data[6:]
            file_flag = True

        first_index, last_index = find_index(raw_data)
        app = raw_data[:first_index]
        body = raw_data[last_index:]

        ihead = {}
        if ';' in body and ':' in body:
            ihead_index = body.index(';')
            ihead_str = body[ihead_index + 1:]
            body = body[:ihead_index]

            ihead_list = ihead_str.split(',')
            for key_value in ihead_list:
                key, value = key_value.split(':')
                ihead[key] = value

        app = app.replace(' ', '')
        dp = Datapack(head={'from': __name__})

        dp.head.update(ihead)

        dp.app = app

        if file_flag:
            dp.method = 'file'
            dp.body = b''
            dp.head['filename'] = body

        else:
            dp.body = body.encode()

        send_queue.put(dp)
        print('Command has been sent', dp)
Esempio n. 2
0
    def __init__(self):
        if ONLYPROXY and not MYPROXY:
            print('config failed because you set onlyproxy true but proxy false')
            return
        self.send_queue = queue.Queue()
        self.id_dict = {}
        self.lock = threading.Lock()
        self.all_connection_list = []
        self.wheel_queue = queue.Queue()

        self.netlist = [] # store nagetive connection
        self.netlist_pass = []
        self.conflist = [] # store config connection
        self.conflist_pass = []
        self.mhtlist = [] # store exchanged connection
        self.mhtlist_pass = []
        self.proxydict = {}

        self.alllist = [self.netlist, self.netlist_pass, self.conflist, self.conflist_pass, \
            self.mhtlist, self.mhtlist_pass]

        self.start_wheel_thread = threading.Thread(target=self.start_wheel, args=(), daemon=True)
        self.start_wheel_thread.start()

        self.start_accpet_connection_thread = threading.Thread(target=self.start_accpet_connection, args=(), daemon=True)
        self.start_accpet_connection_thread.start()

        self.start_sending_dp_thread = threading.Thread(target=self.start_sending_dp, args=(), daemon=True)
        self.start_sending_dp_thread.start()

        self.start_positive_connecting_thread = threading.Thread(target=self.start_positive_connecting, args=(), daemon=True)
        self.start_positive_connecting_thread.start()

        self.start_mht_thread = threading.Thread(target=self.start_mht, args=(), daemon=True)
        self.start_mht_thread.start()
Esempio n. 3
0
    def send_to_id(self, to, dp): # send to 1 id, process proxy at the same time

        connections = self.id_dict.get(to)
        if not connections:
            if to == ID:
                print('To id %s is yourself!' % to, dp) # maybe proxy to yourself
                return
            if to in self.proxydict: # neat warning dangerous code
                if not ID == self.proxydict[to]: # check whether proxy is yourself
                    if dp.head.get('to'):
                        dp.head['to'] = self.proxydict[to] + '&' + to + '&' + dp.head['to']
                    else:
                        dp.head['to'] = self.proxydict[to] + '&' + to
                else:
                    if dp.head.get('to'):
                        dp.head['to'] = to + '&' + dp.head['to'] 
                    else:
                        dp.head['to'] = to
                self.wheel_queue.put(dp)
                return

            print('To id %s has no connection now %d...' % (to, dp.failed_times), dp)
            if dp.head.get('to'):
                dp.head['to'] = to + '&' + dp.head['to'] 
            else:
                dp.head['to'] = to
            self.wheel_queue.put(dp)
            return
        
        connection = connections[0]
        connection.sendall(dp)
Esempio n. 4
0
 def start_wheel(self):
     while True:
         dp = self.wheel_queue.get()
         dp.failed_times += 1
         if dp.failed_times > 39:
             print('Datapack abandom', dp)
             continue
         time.sleep(RETRYSLEEP)
         receive_queue.put(dp)
Esempio n. 5
0
 def getlist(self, conntype):
     if conntype == 'net':
         return self.netlist, self.netlist_pass
     elif conntype == 'conf':
         return self.conflist, self.conflist_pass
     elif conntype == 'mht':
         return self.mhtlist, self.mhtlist_pass
     else:
         print('Could not find conntype %s' % conntype)
         return None, None
Esempio n. 6
0
    def check_id(self):
        '''
        check id package must like
        -------------------------------
        post handshake msw/0.1
        id: [yourID]
        listen_port: [3900]
        length: 0
        
        -------------------------------
        error code list:
        1: not get "id" in head
        2: receive data failed
        3: appname is not handshake
        4: id is yourself
        '''
        data = None
        if self.positive:
            self.send_id()
        try:
            data = self.conn.recv(BUFFSIZE)
        except ConnectionResetError:
            print('One connection failed before ID check')

        if not data:
            return 2, ''

        self.buff += data
        dp = Datapack()
        dp.encode_data = self.buff # maybe here needs to use copy.copy(self.buff)
        self.buff = dp.decode(only_head=True)
        if not dp.head.get('id'):
            return 1, dp.head.get('flag')

        if not dp.app == 'handshake':
            return 3, dp.head.get('flag')

        self.id = dp.head['id']
        self.listen_port = int(dp.head.get('listen_port'))

        if self.id == ID:
            #print('you connect to your self')
            return 4, dp.head.get('flag')

        if ONLYPROXY and not self.id == MYPROXY: # refuce not proxy connection
            return 5, dp.head.get('flag')

        if dp.head.get('onlyuseproxy'):
            if not dp.head['onlyuseproxy'] == ID:
                return 6, dp.head.get('flag')

        if not self.positive:
            self.send_id()

        return 0, dp.head.get('flag')
Esempio n. 7
0
    def process_command(self, dp):
        if dp.body == b'status':
            result = ''
            result += 'Online %s' % str(self.id_dict) + '\n'
            result += 'proxydict %s' % str(self.proxydict) + '\n'
            result += 'conflist %s' % str(self.conflist) + '\n'
            result += 'conflist_pass %s' % str(self.conflist_pass) + '\n'
            result += 'netlist %s' % str(self.netlist) + '\n'
            result += 'netlist_pass %s' % str(self.netlist_pass) + '\n'
            result += 'mhtlist %s' % str(self.mhtlist) + '\n'
            result += 'mhtlist_pass %s' % str(self.mhtlist_pass)
            
            ndp = dp.reply()
            ndp.body = result.encode()
            send_queue.put(ndp)

        elif dp.body == b'mht' and dp.method == 'get':
            ndp = dp.reply()

            data_dict = {}
            connection_list = []
            with self.lock:
                for id in self.id_dict:
                    connections = self.id_dict[id]
                    for connection in connections:
                        ip, port = connection.conn.getpeername()
                        port = int(connection.listen_port)
                        connection_list.append((ip, port))
                for addr in self.conflist:
                    if not addr in connection_list:
                        connection_list.append(addr)
                for addr in self.conflist_pass:
                    if not addr in connection_list:
                        connection_list.append(addr)
            data_dict['mht'] = connection_list
            data_dict['proxy'] = self.proxydict

            ndp.body = json.dumps(data_dict).encode()

            send_queue.put(ndp)

        elif dp.method == 'reply':
            mhtstr = dp.body.decode()
            data_dict = json.loads(mhtstr)
            mhtlist = data_dict['mht']
            with self.lock:
                for addr in mhtlist:
                    addr = (addr[0], addr[1])
                    if not self.check_in_list(addr):
                        self.mhtlist.append(addr)

                self.proxydict.update(data_dict['proxy'])

        else:
            print('Received unknown command', dp)
Esempio n. 8
0
def main():
    while True:
        dp = receive_queue.get()

        if dp.method == 'file':
            word = dp.head.get('filename')
        else:
            word = dp.body.decode()

        print('Writedown log: %s' % (word), dp)
        with open('logger.log', 'a') as f:
            if dp.head.get('from'):
                from_app_name = dp.head.get('from')
            else:
                from_app_name = 'Unknown'
            f.write(from_app_name + ': ' + dp.body.decode() + '\n')
Esempio n. 9
0
    def set_connection(self, connection):
        id = connection.id
        with self.lock:
            if not self.id_dict.get(id):
                self.id_dict[id] = []
            self.id_dict[id].append(connection)
            self.all_connection_list.append(connection)

            xxxlist, xxxlist_pass = self.getlist(connection.conntype)
            addr = (connection.addr[0], connection.listen_port)
            if addr in xxxlist:
                xxxlist.remove(addr)
            if not addr in xxxlist_pass:
                xxxlist_pass.append(addr)

            print('<%s> %s connected' % (connection.flag, id))
Esempio n. 10
0
    def del_connection(self, connection):
        id = connection.id
        with self.lock:
            self.id_dict[id].remove(connection)
            self.all_connection_list.remove(connection)
            if id in self.id_dict and not self.id_dict.get(id): # del the empty user
                del(self.id_dict[id])

            if connection.listen_port: # avoid "None" addr port
                xxxlist, xxxlist_pass = self.getlist(connection.conntype)
                addr = (connection.addr[0], connection.listen_port)
                if not addr in xxxlist:
                    xxxlist.append(addr)
                if addr in xxxlist_pass:
                    xxxlist_pass.remove(addr)

            print('<%s> %s disconnected' % (connection.flag, id))
Esempio n. 11
0
    def start_accpet_connection(self):
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        listen_ip = jsondata.try_to_read_jsondata('listen_ip', '127.0.0.1')
        listen_port = jsondata.try_to_read_jsondata('listen_port', 3900)
        s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        s.bind((listen_ip, listen_port))

        listen_num = jsondata.try_to_read_jsondata('listen_num', 39)
        s.listen(listen_num)

        print('Sucessfully listen at %s:%s, max connection:%s' % (listen_ip, listen_port, listen_num))

        while True:
            conn, addr = s.accept()
            connection = Connection(conn, addr, self)
            connection.i_did_something()
Esempio n. 12
0
    def read_addrlist(self):
        if not os.path.exists('addrlist.txt'):
            print('addrlist.txt not exists, config that base on addrlist_sample.txt')
        else:
            with open('addrlist.txt', 'r') as f:
                raw_data = f.read()
            raw_data = raw_data.replace('\r', '')
            lines = raw_data.split('\n')
            while '' in lines:
                lines.remove('')
            for line in lines:
                ip, port = line.split(':')
                ip = socket.gethostbyname(ip)
                port = int(port)

                self.conflist.append((ip, port))


        if jsondata.try_to_read_jsondata('proxy', False):
            self.proxydict[ID] = jsondata.raw_jsondata['proxy']
Esempio n. 13
0
 def send_func(self):
     while True:
         dp = self.padding_queue.get()
         dp.encode()
         self.conn.sendall(dp.encode_data)
         if dp.method == 'file':
             with open(dp.head['filename'], 'rb') as f:
                 for data in f:
                     try:
                         self.conn.sendall(data)
                     except Exception as e:
                         print('Failed to send file %s %s: %s' % (dp.head['filename'], type(e), str(e)), dp)
                         if dp.head.get('to'):
                             dp.head['to'] = self.id + '&' + dp.head['to']
                         else:
                             dp.head['to'] = self.id
                         self.netowrk_controller.wheel_queue.put(dp)
                         break
             if dp.delete:
                 os.remove(dp.head['filename'])
             print('Send file %s to %s finished' % (dp.head['filename'], self.id), dp)
Esempio n. 14
0
def main(model_path, backbone, scale, path, save_path, gpu_id):
    device = torch.device("cuda:" + str(gpu_id))
    logger = setup_logger(os.path.join(config.output_dir, 'test_log'))
    logger.info(config.print())
    if os.path.exists(save_path):
        shutil.rmtree(save_path, ignore_errors=True)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    save_img_folder = os.path.join(save_path, 'img')
    if not os.path.exists(save_img_folder):
        os.makedirs(save_img_folder)
    save_txt_folder = os.path.join(save_path, 'result')
    if not os.path.exists(save_txt_folder):
        os.makedirs(save_txt_folder)
    img_paths = [os.path.join(path, x) for x in os.listdir(path)]
    net = PSENet(backbone=backbone,
                 pretrained=config.pretrained,
                 result_num=config.n)
    model = Pytorch_model(model_path, net=net, scale=scale, gpu_id=gpu_id)
    num_gpus = torch.cuda.device_count()
    if num_gpus > 1:
        model = nn.DataParallel(model)
    model = model.to(device)
    model = nn.DataParallel(model)
    recall, precision, f1 = merge_eval(model=model,
                                       save_path=os.path.join(
                                           config.output_dir, 'output'),
                                       test_path=config.testroot,
                                       device=device,
                                       base_path=config.base_path,
                                       use_sub=config.use_sub)
    logger.info('test: recall: {:.6f}, precision: {:.6f}, f1: {:.6f}'.format(
        recall, precision, f1))

    # total_frame = 0.0
    # total_time = 0.0
    # for img_path in tqdm(img_paths):
    #     img_name = os.path.basename(img_path).split('.')[0]
    #     save_name = os.path.join(save_txt_folder, 'res_' + img_name + '.txt')
    #     _, boxes_list, t = model.predict(img_path)
    #     total_frame += 1
    #     total_time += t
    #     # img = draw_bbox(img_path, boxes_list, color=(0, 0, 255))
    #     # cv2.imwrite(os.path.join(save_img_folder, '{}.jpg'.format(img_name)), img)
    #     np.savetxt(save_name, boxes_list.reshape(-1, 8), delimiter=',', fmt='%d')
    # print('fps:{}'.format(total_frame / total_time))
    return save_txt_folder
Esempio n. 15
0
    def __init__(self):

        help(config._config)

        arguments = self.parse_arguments()

        if arguments.print_config:
            print(config.print(config.default_config()))
            return

        if not arguments.config_file:
            print("Must specify configuration file via -c. If no configuration"
                  + " file exists, you can generate a blank one with the -p"
                  + " flag")
            return

        try:
            self.config = config.parse(arguments.config_file)
        except config.BadConfiguration:
            print("Your configuration file is invalid. To generate a new,"
                  + " blank configuration, use the -p flag.")
Esempio n. 16
0
    def start_sending_dp(self):
        while True:
            dp = receive_queue.get()

            if dp.app == 'net' and not dp.head.get('to'):
                self.process_command(dp)
                continue
            
            if not dp.head.get('to'):
                print('You got a no head datapack')
                print(str(dp.head))
                continue

            to_str = dp.head['to']
            to_list = to_str.split('&')
            to = to_list.pop(0)
            to_str = '&'.join(to_list)
            dp.head['to'] = to_str
            
            if to == '*':
                with self.lock:
                    for id in self.id_dict:
                        connection = self.id_dict[id][0]
                        connection.sendall(dp)
            elif not to:
                print('not to', dp)

            elif ONLYPROXY and not to == MYPROXY:
                if dp.head['to']:
                    dp.head['to'] = to + dp.head['to']
                else:
                    dp.head['to'] = to
                self.send_to_id(MYPROXY, dp)

            else:
                self.send_to_id(to, dp)
Esempio n. 17
0
    def receive(self):
        still_need = 0

        while True:
            try:
                data = self.conn.recv(BUFFSIZE)
            except ConnectionResetError:
                break
            except Exception as e:
                print('Connection recv error %s: %s' % (type(e), str(e)))
                break
            if not data:
                break
            self.buff += data
            
            if not still_need:
                dp = Datapack()
                dp.encode_data = self.buff
                try:
                    self.buff = dp.decode(only_head=True)
                    
                    if dp.method == 'file':
                        create_floder(dp.head['filename'])
                        create_floder('tmp/' + dp.head['filename'])
                        self.f = open('tmp/' + dp.head['filename'], 'ab')
                    if dp.method == 'file' and os.path.exists(dp.head['filename']):
                        os.remove(dp.head['filename'])
                        
                except Exception as e:
                    print('Decode head failed %s: %s' % (type(e), str(e)))
                    print(self.buff)
                    break

                length = int(dp.head.get('length'))
                still_need = length
            
            if still_need > len(self.buff):
                # writing tmp data
                if dp.method == 'file':
                    still_need -= self.f.write(self.buff)
                else:
                    dp.body += self.buff
                    still_need -= len(self.buff)
                self.buff = b'' # empty buff because all tmp data has been write

            else: # download complete setuation
                if dp.method == 'file':
                    self.f.write(self.buff[:still_need])
                    self.f.close()
                    self.f = None
                else:
                    dp.body = self.buff[:still_need]
                self.buff = self.buff[still_need:]
                still_need = 0
            
                # bleow code are using to process datapack
                if dp.method == 'file':
                    os.rename('tmp/' + dp.head['filename'], dp.head['filename'])
                    print('Received file %s from %s' % (dp.head['filename'], self.id), dp)
                send_queue.put(dp)

        
        # below code are using to closed connection
        if self.f:
            self.f.close()
            self.f = None
        self.conn.close()
        self.netowrk_controller.del_connection(self)
Esempio n. 18
0
def main():
    while True:
        try:
            _main()
        except Exception as e:
            print('Error in %s, %s: %s' % (__name__, type(e), str(e)))
Esempio n. 19
0
def print_reply_func():
    while True:
        dp = receive_queue.get()
        dp.encode()
        print(dp.encode_data.decode())
Esempio n. 20
0
import adb
import config
import run_one

# Read config
config = config.ReadConfig("./python/config.json")
config.print()

# Push model files into adb
adb_runner = adb.AdbRunner("./python/output.txt")
adb_runner.push_files("./resource/<model name>")

# Run One
one_runner = run_one.OneRunner(config)

#adb_runner.run("""ls -all
#echo "hi"
#echo "nice to meet you"
#""", clean = True)

#adb_runner.close()
# run
Esempio n. 21
0
def main():
    if config.output_dir is None:
        config.output_dir = 'output'
    if config.restart_training:
        shutil.rmtree(config.output_dir, ignore_errors=True)
    if not os.path.exists(config.output_dir):
        os.makedirs(config.output_dir)

    logger = setup_logger(os.path.join(config.output_dir, 'train_log'))
    logger.info(config.print())

    torch.manual_seed(config.seed)  # 为CPU设置随机种子
    if config.gpu_id is not None and torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True
        logger.info('train with gpu {} and pytorch {}'.format(config.gpu_id, torch.__version__))
        device = torch.device("cuda:0")
        torch.cuda.manual_seed(config.seed)  # 为当前GPU设置随机种子
        torch.cuda.manual_seed_all(config.seed)  # 为所有GPU设置随机种子
    else:
        logger.info('train with cpu and pytorch {}'.format(torch.__version__))
        device = torch.device("cpu")

    train_data = MyDataset(config.trainroot, config.MIN_LEN, config.MAX_LEN, transform=transforms.ToTensor())
    train_loader = Data.DataLoader(dataset=train_data, batch_size=config.train_batch_size, shuffle=True,
                                   num_workers=int(config.workers))

    writer = SummaryWriter(config.output_dir)
    model = CTPN_Model(pretrained=config.pretrained)
    if not config.pretrained and not config.restart_training:
        model.apply(weights_init)

    num_gpus = torch.cuda.device_count()
    if num_gpus > 1:
        model = nn.DataParallel(model)
    model = model.to(device)
    dummy_input = torch.zeros(1, 3, 600, 800).to(device)
    writer.add_graph(model=model, input_to_model=dummy_input)
    criterion = CTPNLoss(device)
    # optimizer = torch.optim.SGD(model.parameters(), lr=config.lr, momentum=0.99)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
    if config.checkpoint != '' and not config.restart_training:
        print('Loading Checkpoint...')
        start_epoch = load_checkpoint(config.ch9eckpoint, model, logger, device)
        start_epoch += 1
        scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, config.lr_decay_step, gamma=config.lr_gamma,
                                                         last_epoch=start_epoch)
    else:
        start_epoch = config.start_epoch
        scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, config.lr_decay_step, gamma=config.lr_gamma)

    all_step = len(train_loader)
    logger.info('train dataset has {} samples,{} in dataloader'.format(train_data.__len__(), all_step))
    epoch = 0
    best_model = {'loss': float('inf')}
    try:
        for epoch in range(start_epoch, config.epochs):
            start = time.time()
            train_loss, lr = train_epoch(model, optimizer, scheduler, train_loader, device, criterion, epoch, all_step,
                                         writer, logger)
            logger.info('[{}/{}], train_loss: {:.4f}, time: {:.4f}, lr: {}'.format(
                epoch, config.epochs, train_loss, time.time() - start, lr))
            # if (0.3 < train_loss < 0.4 and epoch % 1 == 0) or train_loss < 0.3:
            if epoch % 10 == 0 or train_loss < best_model['loss']:
                net_save_path = '{}/PSENet_{}_loss{:.6f}.pth'.format(config.output_dir, epoch, train_loss)
                save_checkpoint(net_save_path, model, optimizer, epoch, logger)
                if train_loss < best_model['loss']:
                    best_model['loss'] = train_loss
                    if 'model' in best_model:
                        os.remove(best_model['model'])
                    best_model['model'] = net_save_path
                    shutil.copy(best_model['model'],
                                '{}/best_loss{:.6f}.pth'.format(config.output_dir, best_model['loss']))
        writer.close()
    except KeyboardInterrupt:
        pass
    finally:
        if best_model['model']:
            shutil.copy(best_model['model'], '{}/best_loss{:.6f}.pth'.format(config.output_dir, best_model['loss']))
            logger.info(best_model)
Esempio n. 22
0
    def run_as_server(self):
        _padding_to_convert = os.listdir('res/ffmpeg_tmp')
        padding_to_convert = []
        for file in _padding_to_convert:
            file = 'res/ffmpeg_tmp/' + file
            padding_to_convert.append(file)
        already_in_convert = []
        finished_convert = []  # outputfilename

        while True:
            dp = receive_queue.get()

            if dp.method == 'post' and dp.body == b'status':
                result = ''
                result += 'padding_to_convert ' + str(
                    padding_to_convert) + '\n'
                result += 'already_in_convert ' + str(
                    already_in_convert) + '\n'
                result += 'finished_convert ' + str(finished_convert) + '\n'
                result += 'convert_task_queue size ' + str(
                    self.convert_task_queue.qsize())
                ndp = dp.reply()
                ndp.body = result.encode()
                send_queue.put(ndp)

            elif dp.method == 'post' and dp.body == b'reset':
                padding_to_convert = already_in_convert
                already_in_convert = []

            elif dp.method == 'post' and dp.body == b'stop':
                break

            elif dp.method == 'post' and dp.body == b'pause':
                self.pause = True

            elif dp.method == 'post' and dp.body == b'continue':
                self.pause = False

            elif dp.method == 'get':
                if self.pause:
                    ndp = dp.reply()
                    ndp.method = 'post'
                    ndp.body = b'disable'
                    send_queue.put(ndp)
                    continue
                if padding_to_convert:
                    filename = padding_to_convert.pop(0)
                    already_in_convert.append(filename)

                    print('%s get %s to convert' % (dp.head['id'], filename),
                          dp)

                    ndp = dp.reply()
                    ndp.method = 'file'
                    ndp.head['filename'] = filename

                    send_queue.put(ndp)

                else:
                    if not already_in_convert:  # finished
                        break
                    else:  # waiting for final convert
                        ndp = dp.reply()
                        ndp.method = 'post'
                        ndp.body = b'disable'
                        send_queue.put(ndp)

            elif dp.method == 'file':
                old_filename = dp.head['old_filename']
                filename = dp.head['filename']

                os.remove(old_filename)
                already_in_convert.remove(old_filename)
                finished_convert.append(filename)

                total = len(padding_to_convert) + len(
                    already_in_convert) + len(finished_convert)
                print('Processing...(%d) %d/%d %s' % \
                    (len(already_in_convert), \
                    len(finished_convert), \
                    total, \
                    str(round(len(finished_convert)/total*100, 2))))

                if not padding_to_convert and not already_in_convert:  # final process
                    break

        print('Mapreduce finished')
Esempio n. 23
0
    def mainloop(self):
        _create_floder('res/ffmpeg_tmp')
        _create_floder('res/ffmpeg_finished')
        _create_floder('res/ffmpeg_task')
        _create_floder('res/ffmpeg_old')
        _create_floder('res/ffmpeg_complet')

        while True:
            dp = receive_queue.get()

            if dp.method == 'post' and dp.body == b'concat':
                self.org_filename = dp.head['filename']
                self.object_filename = self.org_filename[:-4] + '.mkv'
                self.concat_func()

            if dp.method == 'post' and dp.body == b'autostart':
                filelist = os.listdir('res/ffmpeg_task')
                self.tasklist = []
                for file in filelist:
                    if len(file) > 3:
                        ext = file[-4:]
                        if ext in ['.mp4', '.MP4', '.mkv', '.MKV']:
                            self.tasklist.append('res/ffmpeg_task/' + file)
                dp = Datapack()
                dp.app = 'ffmpeg'
                dp.body = b'start'
                dp.head['filename'] = self.tasklist.pop(0)
                self.autostart = dp.head['filename']
                send_queue.put(dp)

            if dp.method == 'post' and dp.body == b'start':  # config ffmpeg is server or client
                if dp.head.get('concat'):
                    if dp.head['concat'] == 'true':
                        self.concat = True
                    elif dp.head['concat'] == 'false':
                        self.concat = False
                    else:
                        print('unknown concat value')
                        continue
                else:
                    self.concat = True

                if self.concat:
                    self.org_filename = dp.head['filename']
                    self.object_filename = 'res/ffmpeg_complet/' + os.path.basename(
                        self.org_filename)[:-4] + '.mkv'

                if self.concat:
                    ndp = dp.reply()
                    ndp.body = 'Spliting file %s' % dp.head['filename']
                    ndp.body = ndp.body.encode()
                    send_queue.put(ndp)

                    cmd = 'ffmpeg -i "' + os.path.normpath(
                        dp.head['filename']) + '" -c copy \
                        -f segment -segment_time 20 -reset_timestamps 1 -y \
                        "res/ffmpeg_tmp/' + '%d' + '.mkv"'

                    os.system(cmd)

                self.run_as_server()

                if self.concat:
                    self.concat_func()

                print('All process finished')

            elif dp.method == 'post' and dp.body == b'enable':  # clinet mode
                self.status = 1
                self.server = dp.head['server']
                self.convert_func()

            elif dp.method == 'post' and dp.body == b'status':
                result = 'ffmpeg not working'
                ndp = dp.reply()
                ndp.body = result.encode()

                send_queue.put(ndp)

            elif dp.method == 'get':  # let other client disable
                ndp = dp.reply()
                ndp.method = 'post'
                ndp.body = b'disable'
                print('let %s disabled' % dp.head['id'])

                send_queue.put(ndp)
Esempio n. 24
0
def main():
    if config.output_dir is None:
        config.output_dir = 'output'
    if config.restart_training:
        shutil.rmtree(config.output_dir, ignore_errors=True)
    if not os.path.exists(config.output_dir):
        os.makedirs(config.output_dir)

    logger = setup_logger(os.path.join(config.output_dir, 'train_log'))
    logger.info(config.print())

    torch.manual_seed(config.seed)  # 为CPU设置随机种子
    if config.gpu_id is not None and torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True
        logger.info('train with gpu {} and pytorch {}'.format(
            config.gpu_id, torch.__version__))
        device = torch.device("cuda:0")
        torch.cuda.manual_seed(config.seed)  # 为当前GPU设置随机种子
        torch.cuda.manual_seed_all(config.seed)  # 为所有GPU设置随机种子
    else:
        logger.info('train with cpu and pytorch {}'.format(torch.__version__))
        device = torch.device("cpu")

    train_data = TibetanDataset(config.json_path,
                                data_shape=config.data_shape,
                                n=config.n,
                                m=config.m,
                                transform=transforms.ToTensor(),
                                base_path=config.base_path)
    train_loader = Data.DataLoader(dataset=train_data,
                                   batch_size=config.train_batch_size,
                                   shuffle=True,
                                   num_workers=int(config.workers))

    writer = SummaryWriter(config.output_dir)
    model = PSENet(backbone=config.backbone,
                   pretrained=config.pretrained,
                   result_num=config.n,
                   scale=config.scale)
    if not config.pretrained and not config.restart_training:
        model.apply(weights_init)

    num_gpus = torch.cuda.device_count()
    if num_gpus > 1:
        model = nn.DataParallel(model)
    model = model.to(device)
    # dummy_input = torch.autograd.Variable(torch.Tensor(1, 3, 600, 800).to(device))
    # writer.add_graph(models=models, input_to_model=dummy_input)
    criterion = PSELoss(Lambda=config.Lambda,
                        ratio=config.OHEM_ratio,
                        reduction='mean')
    # optimizer = torch.optim.SGD(models.parameters(), lr=config.lr, momentum=0.99)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
    if config.checkpoint != '' and not config.restart_training:
        start_epoch = load_checkpoint(config.checkpoint, model, logger, device,
                                      optimizer)
        start_epoch += 1
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer,
            config.lr_decay_step,
            gamma=config.lr_gamma,
            last_epoch=start_epoch)
    else:
        start_epoch = config.start_epoch
        scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                         config.lr_decay_step,
                                                         gamma=config.lr_gamma)

    all_step = len(train_loader)
    logger.info('train dataset has {} samples,{} in dataloader'.format(
        train_data.__len__(), all_step))
    epoch = 0
    best_model = {'recall': 0, 'precision': 0, 'f1': 0, 'models': ''}
    try:
        for epoch in range(start_epoch, config.epochs):
            start = time.time()
            train_loss, lr = train_epoch(model, optimizer, scheduler,
                                         train_loader, device, criterion,
                                         epoch, all_step, writer, logger)
            logger.info(
                '[{}/{}], train_loss: {:.4f}, time: {:.4f}, lr: {}'.format(
                    epoch, config.epochs, train_loss,
                    time.time() - start, lr))
            # net_save_path = '{}/PSENet_{}_loss{:.6f}.pth'.format(config.output_dir, epoch,
            #                                                                               train_loss)
            # save_checkpoint(net_save_path, models, optimizer, epoch, logger)
            if (0.3 < train_loss < 0.4 and epoch % 4 == 0) or train_loss < 0.3:
                recall, precision, f1 = merge_eval(model=model,
                                                   save_path=os.path.join(
                                                       config.output_dir,
                                                       'output'),
                                                   test_path=config.testroot,
                                                   device=device,
                                                   base_path=config.base_path,
                                                   use_sub=config.use_sub)
                logger.info(
                    'test: recall: {:.6f}, precision: {:.6f}, f1: {:.6f}'.
                    format(recall, precision, f1))

                net_save_path = '{}/PSENet_{}_loss{:.6f}_r{:.6f}_p{:.6f}_f1{:.6f}.pth'.format(
                    config.output_dir, epoch, train_loss, recall, precision,
                    f1)
                save_checkpoint(net_save_path, model, optimizer, epoch, logger)
                if f1 > best_model['f1']:
                    best_path = glob.glob(config.output_dir + '/Best_*.pth')
                    for b_path in best_path:
                        if os.path.exists(b_path):
                            os.remove(b_path)

                    best_model['recall'] = recall
                    best_model['precision'] = precision
                    best_model['f1'] = f1
                    best_model['models'] = net_save_path

                    best_save_path = '{}/Best_{}_r{:.6f}_p{:.6f}_f1{:.6f}.pth'.format(
                        config.output_dir, epoch, recall, precision, f1)
                    if os.path.exists(net_save_path):
                        shutil.copyfile(net_save_path, best_save_path)
                    else:
                        save_checkpoint(best_save_path, model, optimizer,
                                        epoch, logger)

                    pse_path = glob.glob(config.output_dir + '/PSENet_*.pth')
                    for p_path in pse_path:
                        if os.path.exists(p_path):
                            os.remove(p_path)

                writer.add_scalar(tag='Test/recall',
                                  scalar_value=recall,
                                  global_step=epoch)
                writer.add_scalar(tag='Test/precision',
                                  scalar_value=precision,
                                  global_step=epoch)
                writer.add_scalar(tag='Test/f1',
                                  scalar_value=f1,
                                  global_step=epoch)
        writer.close()
    except KeyboardInterrupt:
        save_checkpoint('{}/final.pth'.format(config.output_dir), model,
                        optimizer, epoch, logger)
    finally:
        if best_model['models']:
            logger.info(best_model)
Esempio n. 25
0
def main():
    if config.output_dir is None:
        config.output_dir = 'output'
    if config.restart_training:
        shutil.rmtree(config.output_dir, ignore_errors=True)
    if not os.path.exists(config.output_dir):
        os.makedirs(config.output_dir)

    logger = setup_logger(os.path.join(config.output_dir, 'train_log'))
    logger.info(config.print())

    torch.manual_seed(config.seed)  # 为CPU设置随机种子
    if config.gpu_id is not None and torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True
        logger.info('train with gpu {} and pytorch {}'.format(
            config.gpu_id, torch.__version__))
        device = torch.device("cuda:0")
        torch.cuda.manual_seed(config.seed)  # 为当前GPU设置随机种子
        torch.cuda.manual_seed_all(config.seed)  # 为所有GPU设置随机种子
    else:
        logger.info('train with cpu and pytorch {}'.format(torch.__version__))
        device = torch.device("cpu")

    train_data = MyDataset(config.trainroot,
                           data_shape=config.data_shape,
                           n=config.n,
                           m=config.m,
                           transform=transforms.ToTensor())
    train_loader = Data.DataLoader(dataset=train_data,
                                   batch_size=config.train_batch_size,
                                   shuffle=True,
                                   num_workers=int(config.workers),
                                   drop_last=True)
    writer = SummaryWriter(config.output_dir)
    model = PSENet(backbone=config.backbone,
                   pretrained=config.pretrained,
                   result_num=config.n,
                   scale=config.scale)
    if not config.pretrained and not config.restart_training:
        model.apply(weights_init)
    ## loading the pretrained weights from drive
    state_dict = torch.load(config.pretrained_path)
    model.load_state_dict(state_dict)

    num_gpus = torch.cuda.device_count()
    if num_gpus > 1:
        model = nn.DataParallel(model)
    model = model.to(device)
    # dummy_input = torch.autograd.Variable(torch.Tensor(1, 3, 600, 800).to(device))
    # writer.add_graph(models=models, input_to_model=dummy_input)
    criterion = PSELoss(Lambda=config.Lambda,
                        ratio=config.OHEM_ratio,
                        reduction='mean')
    # optimizer = torch.optim.SGD(models.parameters(), lr=config.lr, momentum=0.99)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
    if config.checkpoint != '' and not config.restart_training:
        start_epoch = load_checkpoint(config.checkpoint, model, logger, device)
        start_epoch += 1
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer,
            config.lr_decay_step,
            gamma=config.lr_gamma,
            last_epoch=start_epoch)
    else:
        start_epoch = config.start_epoch
        scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                         config.lr_decay_step,
                                                         gamma=config.lr_gamma)

    all_step = len(train_loader)
    logger.info('train dataset has {} samples,{} in dataloader'.format(
        train_data.__len__(), all_step))
    epoch = 0
    try:
        for epoch in range(start_epoch, config.epochs):
            start = time.time()
            train_loss, lr = train_epoch(model, optimizer, scheduler,
                                         train_loader, device, criterion,
                                         epoch, all_step, writer, logger)
            logger.info(
                '[{}/{}], train_loss: {:.4f}, time: {:.4f}, lr: {}'.format(
                    epoch, config.epochs, train_loss,
                    time.time() - start, lr))
            # net_save_path = '{}/PSENet_{}_loss{:.6f}.pth'.format(config.output_dir, epoch,
            #                                                                               train_loss)
            # save_checkpoint(net_save_path, models, optimizer, epoch, logger)

            state_dict = model.state_dict()
            # replace the weight file
            filename = '{}/PSENet_resnet50.pth'.format(config.output_dir)
            if os.path.exists(filename):
                os.unlink(filename)
            torch.save(state_dict, filename)
        writer.close()
    except KeyboardInterrupt:
        filename = '{}/PSENet_resnet50.pth'.format(config.output_dir)
        if os.path.exists(filename):
            os.unlink(filename)
        torch.save(state_dict, filename)

        save_checkpoint('{}/final.pth'.format(config.output_dir), model,
                        optimizer, epoch, logger)