Beispiel #1
0
    def read_label(self, target_type, file_size):
        chara_target_path = os.path.join(self.target_path, 'label_raw',
                                         target_type)
        path_check(chara_target_path)

        result = []
        count = 0
        print(f'开始读取{len(self.paths)}条标签')

        for i in range(len(self.paths)):
            with open(self.paths[i], 'r', encoding='utf-8') as a:
                data = a.readlines()
                if target_type == 'chara':
                    result.append(data[0].replace('\n', '').replace(' ', ''))
                else:
                    result.append(data[1].replace('\n', '').split(' '))

            if ((i + 1) % file_size == 0 and i != 0) or (i + 1) == len(
                    self.paths):
                with open(
                        os.path.join(chara_target_path,
                                     f'label_raw_{count}.pkl'), 'wb') as a:
                    pickle.dump(result, a)
                result = []
                count += 1
                print(f'{i + 1}/{len(self.paths)}条标签处理完成')
Beispiel #2
0
def tv_series(path, name_list, mode):
    print('\n\nchecking tv series...\n')

    for name in name_list:
        path_check(path, name)

        series = ' '.join(name.split()[:-2])
        season = int(name.split()[-1])
        ep_season_occurrence = 's' + zero_prefix(season) + 'e'

        dir_contents = [ep.lower() for ep in os.listdir(path + name)]
        dir_contents_idx = [
            ep.find(ep_season_occurrence.lower()) for ep in dir_contents
        ]
        dir_episodes = [
            ep[x + 4:x + 6] for x, ep in zip(dir_contents_idx, dir_contents)
        ]
        dir_episodes.sort(key=int)

        new_ep = int(1 if os.listdir(path +
                                     name) == [] else int(dir_episodes[-1]) +
                     1)
        tv_episode_check(series, season, 'https://en.wikipedia.org/wiki/',
                         mode, new_ep)
    print('\n###########################################################')
Beispiel #3
0
def anime(path, name_list):
    print('\n\nchecking anime...\n')

    for name in name_list:
        path_check(path, name)

        dir_contents = [ep.split('.')[0] for ep in os.listdir(path + name)]
        dir_contents.sort(key=int)
        new_ep = int('1' if os.listdir(path + name) ==
                     [] else str(int(dir_contents[-1]) + 1))
        anime_episode_check(name, 'http://www.chia-anime.tv/episode/', new_ep)
    print('\n###########################################################')
Beispiel #4
0
 def build_dict(self):
     for root, dirs, files in os.walk(self.path):
         for file in files:
             with open(os.path.join(root, file), 'rb') as a:
                 for item in pickle.load(a):
                     self.add_to_dict(item)
     print('字典构建完成')
     dict_path = os.path.join(self.root_path, 'dict')
     path_check(dict_path)
     with open(os.path.join(dict_path, 'dict.pkl'), 'wb') as a:
         pickle.dump(self.dict, a)
     print('字典保存完成')
Beispiel #5
0
 def read_audio(self, file_size):
     target_dir = os.path.join(self.target_path, 'audio_raw')
     path_check(target_dir)
     result = []
     count = 0
     print(f'开始读取{len(self.paths)}条音频')
     for i in range(len(self.paths)):
         wave_data, sr = librosa.load(self.paths[i], sr=self.sr)
         result.append(wave_data)
         if ((i + 1) % file_size == 0 and i != 0) or (i + 1) == len(
                 self.paths):
             with open(os.path.join(target_dir, f'audio_raw_{count}.pkl'),
                       'wb') as a:
                 pickle.dump(result, a)
             result = []
             count += 1
             print(f'{i + 1}/{len(self.paths)}条音频处理完成')
Beispiel #6
0
    def start_mfcc(self):
        audio_path = os.path.join(self.root_path, 'audio_raw')
        save_path1 = os.path.join(self.root_path, 'mfcc')
        save_path2 = os.path.join(self.root_path, 'x_handled')
        path_check(save_path1)
        path_check(save_path2)

        count = 0
        for root, dirs, files in os.walk(audio_path):
            print(f'开始处理{len(files)}个原始音频文件')
            for file in files:
                with open(os.path.join(root, file), 'rb') as a:
                    audio_data = pickle.load(a)
                    mfcc_data = self.get_mfcc(audio_data)
                    with open(os.path.join(save_path1, f'mfcc_data_{count}.pkl'), 'wb') as b:
                        pickle.dump(mfcc_data, b)
                count += 1
                print(f'第{count}个处理完成')
Beispiel #7
0
    def start_handling(self):
        handled_path = os.path.join(self.root_path, 'label_handled',
                                    self.label_type)
        y_path = os.path.join(self.root_path, 'y_handled', self.label_type)
        path_check(handled_path)
        path_check(y_path)

        count = 0
        for root, dirs, files in os.walk(
                os.path.join(self.root_path, 'label_raw', self.label_type)):
            for file in files:
                with open(os.path.join(root, file), 'rb') as a:
                    label_data, label_len = self.label_to_id(pickle.load(a))
                with open(
                        os.path.join(handled_path,
                                     f'label_handled_{count}.pkl'), 'wb') as a:
                    pickle.dump(label_data, a)
                with open(os.path.join(y_path, f'label_len_{count}.pkl'),
                          'wb') as a:
                    pickle.dump(label_len, a)
                count += 1
        print('标签转换完成')
# arg_type: DATA, MODEL, SOLVER
for arg_type in cfg_dict.keys():
    for arg_name in cfg_dict[arg_type].keys():
        arg_value = cfg_dict[arg_type][arg_name]
        ### find proper w for prediction. ####
        if arg_name == 'pred_w' and args.pred_w:
            arg_value = args.pred_w
        ######################################
        if arg_value == 'default':
            pass
        else:
            command += '--%s %s ' % (arg_name, str(arg_value))

# define and check 'ckpt'
ckpt = os.path.join(curPath, 'ckpt', '%s/%s' % (args.cfg, args.ckpt_suffix))
utils.path_check(ckpt)
command += '--ckpt %s ' % (ckpt)

# load extra unknow args
for i, arg in enumerate(unknown_args):
    if arg.startswith(('-', '--')):
        arg_name, arg_value = arg, unknown_args[i + 1]
        command += '%s %s ' % (arg_name, str(arg_value))

if args.evaluate:
    command += '>%s/eval_log' % (ckpt)
else:
    command += '>%s/log' % (ckpt)

print(command)
Beispiel #9
0
    # ===============================================================================
    parser.add_argument("--sequence-length", type=int, default=35)
    parser.add_argument("--model", type=str, default="resnet")
    parser.add_argument("--num-layers", type=int, default=1)
    parser.add_argument("--hidden-size", type=int, default=512)
    parser.add_argument("--bidirectional", action="store_true")
    parser.add_argument("--learning-rate", type=float, default=1e-4)
    parser.add_argument("--scheduler-step-size", type=int, default=10)
    parser.add_argument("--scheduler-gamma", type=float, default=0.9)
    parser.add_argument("--way", type=int, default=5)
    parser.add_argument("--shot", type=int, default=1)
    parser.add_argument("--query", type=int, default=5)
    args = parser.parse_args()

    # path to save
    path_check(args.save_path)

    # path to tensorboard
    writer = SummaryWriter(args.tensorboard_path)

    # print args and save it in the save_path
    args_print_save(args)

    # this script supports only resnet18 and r2plus1d18
    assert args.model in ["resnet", "r2plus1d"
                          ], "'{}' is not valid model.".format(args.model)

    train_dataset = UCF101(
        model=args.model,
        frames_path=args.frames_path,
        labels_path=args.labels_path,
Beispiel #10
0
from PairImageFolder import DownsizedPairImageFolder
from EnhanceNet import EnhanceNet

from utils import train_net, save_result_img, path_check
import os

if __name__ == '__main__':

    #dataset
    train_data = DownsizedPairImageFolder('./dataset/train', transform=transforms.ToTensor())
    test_data = DownsizedPairImageFolder('./dataset/test', transform=transforms.ToTensor())

    #dataloader
    batch_size = 32
    train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)


    # network
    net = EnhanceNet()

    # training
    train_net(net, train_loader, test_loader, device='cuda:0')

    # save result
    dst = './result'
    f_name = 'cnn_upscale.jpg'

    path_check(dst)
    save_result_img(net, test_data, os.path.join(dst,f_name))