Example #1
0
    def vis(self):

        self.model.eval()

        gallery_path = data.testset.imgs
        query_path = data.queryset.imgs

        # Extract feature
        print('extract features, this may take a few minutes')
        #query_feature = extract_feature(model, tqdm([(torch.unsqueeze(data.query_image, 0), 1)]))
        query_feature = extract_feature(model, tqdm(data.query_loader))
        gallery_feature = extract_feature(model, tqdm(data.test_loader))
        print(query_feature.size())

        for query_Index in range(query_feature.size()[0]):
            # sort images
            query_img_path = query_path[query_Index]
            query_feature_now = query_feature[query_Index].unsqueeze(1)
            print(query_feature_now.size())
            #query_feature = query_feature.view(-1, 1)
            #print(query_feature.size())
            score = torch.mm(gallery_feature, query_feature_now)
            score = score.squeeze(1).cpu()
            score = score.numpy()

            index = np.argsort(score)  # from small to large
            index = index[::-1]  # from large to small

            # # Remove junk images
            # junk_index = np.argwhere(gallery_label == -1)
            # mask = np.in1d(index, junk_index, invert=True)
            # index = index[mask]

            # Visualize the rank result
            fig = plt.figure(figsize=(16, 4))

            ax = plt.subplot(1, 11, 1)
            ax.axis('off')
            plt.imshow(plt.imread(query_img_path))
            ax.set_title('query')

            #print('Top 10 images are as follow:')

            for i in range(10):
                img_path = gallery_path[index[i]]
                #print(img_path)

                ax = plt.subplot(1, 11, i + 2)
                ax.axis('off')
                plt.imshow(plt.imread(img_path))
                ax.set_title(img_path.split('/')[-1][:9])

            fig.savefig(
                "/home/wangminjie/Desktop/wmj/projects/Part-reID_2/result/detect/{}.png"
                .format(query_img_path.split('/')[-1][:9]))
            print('result saved to show{}.png'.format(
                query_img_path.split('/')[-1][:9]))
        print("task end")
Example #2
0
def run_HetGNN(model, hg, het_graph, config):
    # het_graph is used to sample neighbour
    hg = hg.to('cpu')
    category = config.category
    train_mask = hg.nodes[category].data.pop('train_mask')
    test_mask = hg.nodes[category].data.pop('test_mask')
    train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()
    test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
    labels = hg.nodes[category].data.pop('label')
    emd = hg.nodes[category].data['dw_embedding']
    train_batch = load_link_pred('./a_a_list_train.txt')
    test_batch = load_link_pred('./a_a_list_test.txt')
    # HetGNN Sampler
    batch_sampler = SkipGramBatchSampler(hg, config.batch_size,
                                         config.window_size)
    neighbor_sampler = NeighborSampler(het_graph, hg.ntypes,
                                       batch_sampler.num_nodes, config.device)
    collator = HetGNNCollator(neighbor_sampler, hg)
    dataloader = DataLoader(batch_sampler,
                            collate_fn=collator.collate_train,
                            num_workers=config.num_workers)

    opt = th.optim.Adam(model.parameters())

    pred = ScorePredictor()
    dataloader_it = iter(dataloader)
    for i in range(config.max_epoch):
        model.train()
        for batch_id in tqdm.trange(config.batches_per_epoch):
            positive_graph, negative_graph, blocks = next(dataloader_it)
            blocks = [b.to(config.device) for b in blocks]
            positive_graph = positive_graph.to(config.device)
            negative_graph = negative_graph.to(config.device)
            # we need extract multi-feature
            input_features = extract_feature(blocks[0], hg.ntypes)

            x = model(blocks[0], input_features)
            loss = compute_loss(pred(positive_graph, x),
                                pred(negative_graph, x))

            opt.zero_grad()
            loss.backward()
            opt.step()
        print('Epoch {:05d} |Train - Loss: {:.4f}'.format(i, loss.item()))
        input_features = extract_feature(het_graph, hg.ntypes)
        x = model(het_graph, input_features)
        author_link_prediction(x['author'].to('cpu').detach(), train_batch,
                               test_batch)
        micro_f1, macro_f1 = Hetgnn_evaluate(
            x[config.category].to('cpu').detach(), labels, train_idx, test_idx)
        print('<Classification>     Micro-F1 = %.4f, Macro-F1 = %.4f' %
              (micro_f1, macro_f1))
    pass
Example #3
0
def test_feature_extraction():
    print("Testing Feature Extraction with test file..")
    result = extract_feature(list_of_files[0])
    if len(result) > 1:
        print("FEATURE EXTRACTION PASSED")
    else:
        print("FEATURE EXTRACTION FAILED")
    print("Testing Feature Extraction with empty input..")
    result = extract_feature("")
    if result == "File Doesn't Exist":
        print("FEATURE EXTRACTION PASSED")
    else:
        print("FEATURE EXTRACTION FAILED")
Example #4
0
def evaluate(model):
    model.eval()

    with open(test_file, 'r') as file:
        lines = file.readlines()

    angles = []

    start = time.time()
    with torch.no_grad():
        for line in tqdm(lines):
            tokens = line.split()
            file0 = tokens[0]
            mel0 = extract_feature(input_file=file0,
                                   feature='fbank',
                                   dim=hp.n_mels,
                                   cmvn=True)
            mel0 = build_LFR_features(mel0, m=hp.LFR_m, n=hp.LFR_n)
            mel0 = torch.unsqueeze(torch.from_numpy(mel0), dim=0)
            mel0 = mel0.to(hp.device)
            with torch.no_grad():
                output = model(mel0)[0]
            feature0 = output.cpu().numpy()

            file1 = tokens[1]
            mel1 = extract_feature(input_file=file1,
                                   feature='fbank',
                                   dim=hp.n_mels,
                                   cmvn=True)
            mel1 = build_LFR_features(mel1, m=hp.LFR_m, n=hp.LFR_n)
            mel1 = torch.unsqueeze(torch.from_numpy(mel1), dim=0)
            with torch.no_grad():
                mel1 = mel1.to(hp.device)
            output = model(mel1)[0]
            feature1 = output.cpu().numpy()

            x0 = feature0 / np.linalg.norm(feature0)
            x1 = feature1 / np.linalg.norm(feature1)
            cosine = np.dot(x0, x1)
            theta = math.acos(cosine)
            theta = theta * 180 / math.pi
            is_same = tokens[2]
            angles.append('{} {}\n'.format(theta, is_same))

    elapsed_time = time.time() - start
    print('elapsed time(sec) per audio: {}'.format(elapsed_time / (6000 * 2)))

    with open(angles_file, 'w') as file:
        file.writelines(angles)
Example #5
0
    def vis(self):

        self.model.eval()

        gallery_path = data.testset.imgs
        gallery_label = data.testset.ids

        # Extract feature
        print('extract features, this may take a few minutes')
        query_feature = extract_feature(
            model, tqdm([(torch.unsqueeze(data.query_image, 0), 1)]))
        gallery_feature = extract_feature(model, tqdm(data.test_loader))

        # sort images
        query_feature = query_feature.view(-1, 1)
        score = torch.mm(gallery_feature, query_feature)
        score = score.squeeze(1).cpu()
        score = score.numpy()

        index = np.argsort(score)  # from small to large
        index = index[::-1]  # from large to small

        # # Remove junk images
        # junk_index = np.argwhere(gallery_label == -1)
        # mask = np.in1d(index, junk_index, invert=True)
        # index = index[mask]

        # Visualize the rank result
        fig = plt.figure(figsize=(16, 4))

        ax = plt.subplot(1, 11, 1)
        ax.axis('off')
        plt.imshow(plt.imread(opt.query_image))
        ax.set_title('query')

        print('Top 10 images are as follow:')

        for i in range(10):
            img_path = gallery_path[index[i]]
            print(img_path)

            ax = plt.subplot(1, 11, i + 2)
            ax.axis('off')
            plt.imshow(plt.imread(img_path))
            ax.set_title(img_path.split('/')[-1][:9])

        fig.savefig("show.png")
        print('result saved to show.png')
def choose_best_vec():
    train_sets_names = ["A", "B", "C", "D"]

    print("Choosing best vectorization method:")
    clf = LinearDiscriminantAnalysis()
    best_score = 0
    best_vec_method = ""

    for vec_method in train_sets_names:
        X_train, y_train, X_test, _ = training_data()
        X_train, X_test = extract_feature(X_train, X_test, vec_method)
        # Split Training set to predefined train and cross validation
        X_t, X_cv, y_t, y_cv, _ = split_data(X_train, y_train)

        model = clf.fit(X_t, y_t)
        y_pred = model.predict(X_cv)
        score = accuracy_score(y_cv, y_pred)

        print("Method:", vec_method, "cv accuracy:", score)

        if score > best_score:
            best_score = score
            best_vec_method = vec_method

    print("Best vectorization method:", best_vec_method, "with score of:",
          best_score)
    return best_vec_method
Example #7
0
def get_feature(img):
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    # 反向二值化,将黑色的数字变为白色,白色背景变成黑色
    retval, binary = cv.threshold(gray, 0, 255,
                                  cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
    # 定位到各个数字的RoI区域
    contours, hierarchy = cv.findContours(binary, cv.RETR_EXTERNAL,
                                          cv.CHAIN_APPROX_SIMPLE)
    rects = []
    for c in contours:
        rect = cv.boundingRect(c)
        # print(rect)
        rects.append(rect)
        cv.rectangle(img, rect, (0, 0, 255), 1, 8)

    # 按左上角x坐标从小到大排列,如图像td1.png,从左到右排列,分别为1-9、0
    rects = sorted(rects, key=lambda x: x[0], reverse=False)
    feature_list = []
    for i, r in enumerate(rects):
        x, y, w, h = r
        roi = binary[y:y + h, x:x + w]
        feature = extract_feature(roi)
        feature_list.append(feature)
        # print(f"number {(i + 1) % 10} 's features: ", feature)
    return np.array(feature_list), np.array(rects)  # (n, 40), (n, 4)
 def predict(self, audio_path):
     feature = extract_feature(audio_path, **self.audio_config).reshape(
         (1, 1, self.input_length))
     if self.classification:
         return self.int2emotions[self.model.predict_classes(feature)[0][0]]
     else:
         return self.model.predict(feature)[0][0][0]
Example #9
0
    def pre(self):

        json_file = 'results/result_' + time.strftime("%Y%m%d%H%M%S") + '.json'
        json_file_rk = 'results/result_' + time.strftime(
            "%Y%m%d%H%M%S") + '_rk.json'

        self.model.eval()
        t1 = time.time()
        print('extract features, this may take a few minutes')
        feats = extract_feature(self.model, tqdm(self.test_loader))
        print('feats:', feats.shape)

        qf = feats[:self.num_query]
        gf = feats[self.num_query:]
        print('qf,gf:', qf.shape, gf.shape)

        m, n = qf.shape[0], gf.shape[0]
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())

        distmat = distmat.cpu().numpy()
        self.writeResult(distmat, json_file)
        print('re_ranking ...')
        distmat_rk = reRanking(qf, gf, 7, 3, 0.85)
        self.writeResult(distmat_rk, json_file_rk)
        print('Time cost is: {:.2f} s'.format(time.time() - t1))
        print('over!')
Example #10
0
def gen_feat(opt, model_, data_loader, feat_path):
    opt.model_name = model_['model_name']
    opt.model_path = os.path.expanduser(model_['model_path'])
    opt.weight = os.path.expanduser(model_['weight'])
    logger.debug(opt)
    model = build_model(opt, 2432)
    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)
    model = model.to("cuda")
    model.eval()

    with torch.no_grad():
        feat = extract_feature(model, tqdm(data_loader))
    print('feat:', feat.shape)

    # Save to Matlab for check
    feature = feat.numpy()
    result = {'test_f': feature}

    feat_dir, feat_file = os.path.split(feat_path)
    if not os.path.exists(feat_dir):
        os.makedirs(feat_dir, exist_ok=True)
    scipy.io.savemat(feat_path, result)

    return feature
Example #11
0
    def __getitem__(self, i):
        sample = self.samples[i]
        wave = sample['wave']
        trn = sample['trn']

        feature = extract_feature(wave)
        return feature, trn
Example #12
0
def gen_feat(opt, model_, data):
    opt.model_name = model_['model_name']
    opt.model_path = os.path.expanduser(model_['model_path'])
    opt.weight = model_['weight']
    print(opt,'\n')
    if opt.model_name == 'se_resnext50':
        model = build_model(opt, 2432)
        # model.load_state_dict(torch.load(opt.weight))
        model.load_param(opt.weight)
    else:
        # model = build_model(opt, 4950)
        model = build_model(opt, data.num_classes)
        model.load_state_dict(torch.load(opt.weight)['state_dict'])
    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)
    model = model.to("cuda")
    model.eval()

    with torch.no_grad():
        feat = extract_feature(model, tqdm(data.test_loader))
    logger.debug('feat: {}'.format(feat.shape))

    # Save to Matlab for check
    feature = feat.numpy()
    result = {'test_f': feature}

    mat_dir, mat_file = os.path.split(model_['mat_path'])
    if not os.path.exists(mat_dir):
        os.makedirs(mat_dir, exist_ok=True)
    scipy.io.savemat(model_['mat_path'], result)

    return feature
Example #13
0
def results():
    """
    This route is used to save the file, convert the audio to 16000hz monochannel,
    and predict the emotion using the saved binary model
    """
    if not os.path.isdir("./audio"):
        os.mkdir("audio")
    if request.method == 'POST':
        try:
          f = request.files['file']
          filename = secure_filename(f.filename)
          f.save(os.path.join(app.config["UPLOAD_FOLDER"], filename))
        except:
          return render_template('main.html', value="")

    wav_file_pre  = os.listdir("./audio")[0]
    wav_file_pre = f"{os.getcwd()}/audio/{wav_file_pre}"
    wav_file = convert(wav_file_pre)
    os.remove(wav_file_pre)
    model = pickle.load(open(f"{os.getcwd()}/model.model", "rb"))
    x_test =extract_feature(wav_file)
    y_pred=model.predict(np.array([x_test]))
    os.remove(wav_file)
    print(y_pred)
    return render_template('main.html', value=y_pred[0])
Example #14
0
    def test(self, use_cuda, use_fed=False):
        print("="*10)
        print("Start Testing!")
        print("="*10)
        print('We use the scale: %s' % self.multiple_scale)
        
        for dataset in self.data.datasets:
            # if self.use_clustering:
            if use_fed and not self.use_clustering:
                print("Using federated model")
                client_model = self.federated_model.eval()
                if use_cuda:
                    client_model = self.federated_model.cuda()
            else:
                print("Using local model")
                client_model = self.clients[dataset].get_model().eval()  # self.federated_model.eval()
                if use_cuda:
                    client_model = client_model.cuda()  # self.federated_model.cuda()
            # else:
            #     self.federated_model = self.federated_model.eval()
            #     if use_cuda:
            #         self.federated_model = self.federated_model.cuda()

            with torch.no_grad():
                gallery_feature = extract_feature(client_model, self.data.test_loaders[dataset]['gallery'], self.multiple_scale)
                query_feature = extract_feature(client_model, self.data.test_loaders[dataset]['query'], self.multiple_scale)

            result = {
                'gallery_f': gallery_feature.numpy(),
                'gallery_label': self.data.gallery_meta[dataset]['labels'],
                'gallery_cam': self.data.gallery_meta[dataset]['cameras'],
                'query_f': query_feature.numpy(),
                'query_label': self.data.query_meta[dataset]['labels'],
                'query_cam': self.data.query_meta[dataset]['cameras']
            }
            print("====== before loading =======")
            # for i in result:
            #     print(i, np.array(result[i]).shape, result[i][:3])
            # file_path = os.path.join(self.project_dir,
            #                          'model',
            #                          self.model_name,
            #                          'pytorch_result_{}_{}.mat'.format(dataset, random.randint(0, 100000000)))
            # scipy.io.savemat(file_path, result)

            print(self.model_name)
            print(dataset)
            testing_model(result, dataset)
 def predict(self, audio_path):
     """
     given an `audio_path`, this method extracts the features
     and predicts the emotion
     """
     feature = extract_feature(audio_path,
                               **self.audio_config).reshape(1, -1)
     return self.model.predict(feature)[0]
    def __getitem__(self, i):
        sample = self.samples[i]
        wave = sample['wave']
        trn = sample['trn']

        feature = extract_feature(input_file=wave, feature='fbank', dim=80, cmvn=True, delta=True, delta_delta=True)

        return feature, trn
Example #17
0
 def predict_proba(self, audio_path):
     """"""
     feature = extract_feature(audio_path,
                               **self.audio_config).reshape(1, -1)
     proba = self.model.predict_proba(feature)[0]
     result = {}
     for emotion, prob in zip(self.emotions, proba):
         result[emotion] = prob
     return result
 def predict_proba(self, audio_path):
     if self.classification:
         feature = extract_feature(audio_path, **self.audio_config).reshape((1, 1, self.input_length))
         proba = self.model.predict(feature)[0][0]
         result = {}
         for prob, emotion in zip(proba, self.emotions):
             result[emotion] = prob
         return result
     else:
         raise NotImplementedError("Probability prediction doesn't make sense for regression")
Example #19
0
    def evaluate(self):

        self.model.eval()

        print('extract features, this may take a few minutes')
        qf = extract_feature(self.model, tqdm(self.query_loader)).numpy()
        gf = extract_feature(self.model, tqdm(self.test_loader)).numpy()
        # mqf = extract_feature(self.model, tqdm(self.query_loader)).numpy()
        def rank(dist):
            r = cmc(dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras,
                    separate_camera_set=False,
                    single_gallery_shot=False,
                    first_match_break=True)
            m_ap = mean_ap(dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras)

            return r, m_ap
        #
        # ########################   re rank##########################
        # q_g_dist = np.dot(qf, np.transpose(gf))
        # q_q_dist = np.dot(qf, np.transpose(qf))
        # g_g_dist = np.dot(gf, np.transpose(gf))
        # dist = re_ranking(q_g_dist, q_q_dist, g_g_dist)
        #
        # r, m_ap = rank(dist)
        #
        # print('[With Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}'
        #       .format(m_ap, r[0], r[2], r[4], r[9]))
        #
        #
        # #########################no re rank##########################
        dist = cdist(qf, gf)

        r, m_ap = rank(dist)

        index = np.argsort(dist, axis=1)  # from small to large
        index = index[:, :100] + 1

        file = opt.weight
        file = file.split('/model')[0]
        np.savetxt(os.path.join('weights',file, 'rank.txt'), index, delimiter=' ', fmt='%d')

        print('[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}'
              .format(m_ap, r[0], r[2], r[4], r[9]))
Example #20
0
    def evaluate(self):

        self.model.eval()

        print('extract features, this may take a few minutes')
        qf = extract_feature(self.model, tqdm(self.query_loader)).numpy()
        gf = extract_feature(self.model, tqdm(self.test_loader)).numpy()

        def rank(dist):
            r = cmc(dist,
                    self.queryset.ids,
                    self.testset.ids,
                    self.queryset.cameras,
                    self.testset.cameras,
                    separate_camera_set=False,
                    single_gallery_shot=False,
                    first_match_break=True)
            m_ap = mean_ap(dist, self.queryset.ids, self.testset.ids,
                           self.queryset.cameras, self.testset.cameras)

            return r, m_ap

        #########################   re rank##########################
        q_g_dist = np.dot(qf, np.transpose(gf))
        q_q_dist = np.dot(qf, np.transpose(qf))
        g_g_dist = np.dot(gf, np.transpose(gf))
        dist = re_ranking(q_g_dist, q_q_dist, g_g_dist)

        r, m_ap = rank(dist)

        print(
            '[With    Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}'
            .format(m_ap, r[0], r[2], r[4], r[9]))

        #########################no re rank##########################
        dist = cdist(qf, gf)

        r, m_ap = rank(dist)

        print(
            '[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}'
            .format(m_ap, r[0], r[2], r[4], r[9]))
Example #21
0
    def __getitem__(self, i):
        sample = self.samples[i]
        wave = sample['audiopath']
        label = sample['label']

        feature = extract_feature(input_file=wave,
                                  feature='fbank',
                                  dim=hp.n_mels,
                                  cmvn=True)
        feature = build_LFR_features(feature, m=hp.LFR_m, n=hp.LFR_n)

        return feature, label
Example #22
0
    def test(self, use_cuda):
        print("=" * 10)
        print("Start Tesing!")
        print("=" * 10)
        print('We use the scale: %s' % self.multiple_scale)

        for dataset in self.data.datasets:
            self.federated_model = self.federated_model.eval()
            if use_cuda:
                self.federated_model = self.federated_model.cuda()

            with torch.no_grad():
                gallery_feature = extract_feature(
                    self.federated_model,
                    self.data.test_loaders[dataset]['gallery'],
                    self.multiple_scale)
                query_feature = extract_feature(
                    self.federated_model,
                    self.data.test_loaders[dataset]['query'],
                    self.multiple_scale)

            result = {
                'gallery_f': gallery_feature.numpy(),
                'gallery_label': self.data.gallery_meta[dataset]['labels'],
                'gallery_cam': self.data.gallery_meta[dataset]['cameras'],
                'query_f': query_feature.numpy(),
                'query_label': self.data.query_meta[dataset]['labels'],
                'query_cam': self.data.query_meta[dataset]['cameras']
            }

            scipy.io.savemat(
                os.path.join(self.project_dir, 'model', self.model_name,
                             'pytorch_result.mat'), result)

            print(self.model_name)
            print(dataset)

            os.system('python evaluate.py --result_dir {} --dataset {}'.format(
                os.path.join(self.project_dir, 'model', self.model_name),
                dataset))
Example #23
0
    def pre(self):

        json_file = 'results/result_' + time.strftime("%Y%m%d%H%M%S") + '.json'
        json_file_rk = 'results/result_' + time.strftime(
            "%Y%m%d%H%M%S") + '_rk.json'

        self.model.eval()
        t1 = time.time()
        print('extract features, this may take a few minutes')
        feats = extract_feature(self.model, tqdm(self.test_loader))
        print('feats:', feats.shape)

        qf = feats[:self.num_query]
        gf = feats[self.num_query:]
        print('qf,gf:', qf.shape, gf.shape)

        m, n = qf.shape[0], gf.shape[0]
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())

        distmat = distmat.cpu().numpy()
        self.writeResult(distmat, json_file)
        print('re_ranking ...')
        distmat_rk = reRanking(qf, gf, 7, 3, 0.85)
        self.writeResult(distmat_rk, json_file_rk)

        # print('distmat:', distmat.shape)
        # index = np.argsort(distmat)  # from small to large
        # max_index = index[:, :200]
        # print(max_index.shape)
        #
        # # Visualize the rank result
        # results = {}
        # json_file = 'results/result_' + time.strftime("%Y%m%d%H%M%S") + '.json'
        # json_file_rk = 'results/result_' + time.strftime("%Y%m%d%H%M%S") + '_rk.json'
        # for i in range(len(self.query_paths)):
        #     query_name = self.query_paths[i].split('/')[-1]
        #     index_mask = max_index[i]
        #     gallery_name = [self.gallery_paths[k].split('/')[-1] for k in index_mask]
        #     #print(index_mask)
        #
        #     results[query_name] = gallery_name
        #     #print(res)
        #
        # with open(json_file, 'w', encoding='utf-8') as fp:
        #     json.dump(results, fp)

        print('Time cost is: {:.2f} s'.format(time.time() - t1))
        print('over!')
Example #24
0
    def __getitem__(self, i):
        sample = self.samples[i]
        wave = sample['wave']
        trn = sample['trn']

        feature = extract_feature(input_file=wave,
                                  feature='fbank',
                                  dim=self.args.d_input,
                                  cmvn=True)
        feature = build_LFR_features(feature,
                                     m=self.args.LFR_m,
                                     n=self.args.LFR_n)

        return feature, trn
Example #25
0
def sub_extract(args):
    iscount = args.count
    k, gap, lam, n_jobs = args.kmer, args.gap, args.lam, args.process
    if args.directory:
        out = Path(args.output[0])
        out.mkdir(exist_ok=True)
        ul.batch_extract(args.file,
                         out,
                         k,
                         gap,
                         lam,
                         n_jobs=n_jobs,
                         count=iscount)
    else:
        if args.raa:
            raa = list(args.raa)
        else:
            raa = list(Path(args.file[0]).stem.split('-')[-1])
        xy_ls = []
        aa_ls = [''.join(aa) for aa in product(raa, repeat=k)]
        for idx, file in enumerate(args.file):
            feature_file = Path(file)
            xy = ul.extract_feature(feature_file,
                                    raa,
                                    k,
                                    gap,
                                    lam,
                                    count=iscount)
            new_aa_ls = aa_ls
            if args.index:
                _, (fea_idx, _) = ul.load_data(args.index,
                                               normal=False,
                                               label_exist=False)
                fea_idx = fea_idx.astype(int).flatten()
                xy = xy[:, fea_idx]
                new_aa_ls = [aa_ls[i] for i in fea_idx]
            if args.label_f:
                y = np.array([[idx]] * xy.shape[0])
                xy = np.hstack([y, xy])
            xy_ls.append(xy)
        new_aa_ls.insert(0, 'label') if args.label_f else 0
        header = ','.join(new_aa_ls)
        if args.merge:
            out = args.output[0]
            seq_mtx = np.vstack(xy_ls)
            ul.write_array(Path(out), seq_mtx, header=header)
            exit()
        for idx, o in enumerate(args.output):
            ul.write_array(Path(o), xy_ls[idx], header=header)
def predictSpeech():
    print("Please talk")
    filename = "test.wav"
    # record the file (start talking)
    record_to_file(filename)
    
    # load the saved model (after training)
    model = pickle.load(open("result/mlp_classifier.model", "rb"))
    # extract features and reshape it
    features = extract_feature(file_name="test.wav", mfcc=True, chroma=True, mel=True).reshape(1, -1)
    # predict
    result = model.predict(features)[0]
    # show the result !
    print("result:", result)
    return result
 def predict_proba(self, audio_path):
     """
     Predicts the probability of each emotion.
     """
     if self.classification:
         feature = extract_feature(audio_path,
                                   **self.audio_config).reshape(1, -1)
         proba = self.model.predict_proba(feature)[0]
         result = {}
         for emotion, prob in zip(self.model.classes_, proba):
             result[emotion] = prob
         return result
     else:
         raise NotImplementedError(
             "Probability prediction doesn't make sense for regression")
Example #28
0
    def __getitem__(self, i):
        sample = self.samples[i]
        wave = sample['wave']
        trn = sample['trn']

        feature = extract_feature(input_file=wave,
                                  feature='fbank',
                                  dim=self.args.d_input,
                                  cmvn=True)
        # zero mean and unit variance
        feature = (feature - feature.mean()) / feature.std()
        feature = spec_augment(feature)
        feature = build_LFR_features(feature,
                                     m=self.args.LFR_m,
                                     n=self.args.LFR_n)

        return feature, trn
Example #29
0
    return sample_width, r


def record_to_file(path):
    "Records from the microphone and outputs the resulting data to 'path'"
    sample_width, data = record()
    data = pack('<' + ('h' * len(data)), *data)

    wf = wave.open(path, 'wb')
    wf.setnchannels(1)
    wf.setsampwidth(sample_width)
    wf.setframerate(RATE)
    wf.writeframes(data)
    wf.close()


if __name__ == "__main__":
    # load the saved model (after training)
    model = pickle.load(open("result/mlp_classifier.model", "rb"))
    print("Please talk")
    filename = "test.wav"
    # record the file (start talking)
    record_to_file(filename)
    # extract features and reshape it
    features = extract_feature(filename, mfcc=True, chroma=True,
                               mel=True).reshape(1, -1)
    # predict
    result = model.predict(features)[0]
    # show the result !
    print("result:", result)
Example #30
0
    def compute_true_false_miss(self, conf, log_dir, feat_path, tta):
        def gen_distmat(qf, q_pids, gf, g_pids):
            m, n = qf.shape[0], gf.shape[0]
            logger.debug('query shape {}, gallery shape {}'.format(qf.shape, gf.shape))
            # logger.debug('q_pids {}, g_pids {}'.format(q_pids, g_pids))
            distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                      torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
            distmat.addmm_(1, -2, qf, gf.t())
            distmat = distmat.cpu().numpy()
            return distmat# embeddings[idx:idx + conf.batch_size] = l2_norm(emb_batch).cpu()

        def distance(emb1, emb2):
            diff = np.subtract(emb1, emb2)
            dist = np.sum(np.square(diff), 1)
            return dist

        self.model.eval()
        if conf.gen_feature:
            with torch.no_grad():
                query_feature, query_label = extract_feature(conf, self.model, self.loader['query']['dl'], tta)
                gallery_feature, gallery_label = extract_feature(conf, self.model, self.loader['gallery']['dl'], tta)

            result = {'query_feature': query_feature.numpy(), 'query_label': query_label.numpy(),
                'gallery_feature': gallery_feature.numpy(), 'gallery_label': gallery_label.numpy()}
            scipy.io.savemat(feat_path, result)

        else:
            result = scipy.io.loadmat(feat_path)
            query_feature = torch.from_numpy(result['query_feature'])
            query_label = torch.from_numpy(result['query_label'])[0]
            gallery_feature = torch.from_numpy(result['gallery_feature'])
            gallery_label = torch.from_numpy(result['gallery_label'])[0]

        distmat = gen_distmat(query_feature, query_label, gallery_feature, gallery_label)

        # record txt
        with open(os.path.join(log_dir, 'result.txt'),'at') as f:
            f.write('%s\t%s\t%s\t%s\n' % ('threshold', 'acc', 'err', 'miss'))

        # record excel
        xls_file = xlwt.Workbook()
        sheet_1 = xls_file.add_sheet('sheet_1', cell_overwrite_ok=True)
        row = 0
        path_excel = os.path.join(log_dir, 'result.xls')

        sheet_title = ['threshold', 'acc', 'err', 'miss']
        for i_sheet in range(len(sheet_title)):
            sheet_1.write(row, i_sheet, sheet_title[i_sheet])
        xls_file.save(path_excel)
        row += 1


        index = np.argsort(distmat)  # from small to large
        max_index = index[:, 0]

        query_list_file = 'data/probe.txt'
        gallery_list_file = 'data/gallery.txt'
        err_rank1 = os.path.join(log_dir, 'err_rank1.txt')
        data_path = DataPath(query_list_file, gallery_list_file)
        with open(err_rank1,'at') as f:
            f.write('%s\t\t\t%s\n' % ('query', 'gallery'))

        thresholds = np.arange(0.4, 2, 0.01)
        for threshold in thresholds:
            acc, err, miss = compute_rank1(distmat, max_index, query_label, gallery_label, threshold, data_path, err_rank1)
            # record txt
            with open(os.path.join(log_dir, 'result.txt'),'at') as f:
                f.write('%.6f\t%.6f\t%.6f\t%.6f\n' % (threshold, acc, err, miss))

            # record excel
            list_data = [threshold, acc, err, miss]
            for i_1 in range(len(list_data)):
                sheet_1.write(row, i_1, list_data[i_1])
            xls_file.save(path_excel)
            row += 1