Exemple #1
0
def clear():
    Model.destroy_model()
    Laboratory.destroy_laboratory()

    Model.create_model('test-model')
    Laboratory.create_laboratory('test-lab')

    destroy_mapping()
    create_mapping('test-map-model-to-lab')
    def __init__(self):


        #load the model
        self.model = Model()
        self.model.model_load("./inference_models/ecgnet_0_fold_0.6078759902401878.pt")

        #load preprocessng pipeline

        #load thresholds
        self.postptocessing = PostProcessing(0)
Exemple #3
0
def test_no_dublicated_mapping():
    clear()

    m = Model.add_host('alice')
    n = Laboratory.add_device('node1')
    bind(m, n)
    bind(m, n)
    bind(n, m)
    assert len(get_mapping().bindings()) == 1
Exemple #4
0
def test_binding():
    clear()

    m = Model.add_host('alice')
    n = Laboratory.add_device('node1')
    bind(m, n)

    assert(m.bound() == n)
    assert(n.bound() == m)
    assert len(get_mapping().bindings()) == 1
Exemple #5
0
def load_12ECG_model(model_input):
    """Load Physionet2017 Model
    model_input: This is an argument from running driver.py on command line. I think we just ignore it and hard-code
    out model path.
    """

    models_list = [
        'ecgnet_0_fold_0.631593191670484', 'ecgnet_1_fold_0.6370736239012214',
        'ecgnet_2_fold_0.6444454717434089', 'ecgnet_3_fold_0.6195938932528102',
        'ecgnet_4_fold_0.6149398148500164', 'ecgnet_5_fold_0.6409127451470004'
    ]

    os.makedirs(model_input + '/pretrained/', exist_ok=True)

    # load the model
    models = []
    for i in models_list:
        model_stack = Model(input_size=19000,
                            n_channels=15,
                            hparams=hparams,
                            gpu=[],
                            inference=True)
        model_stack.model_load("./inference_models/" + i + ".pt")
        model_stack.model_save(model_input + '/pretrained/' + i + ".pt")
        models.append(model_stack)

    return models
Exemple #6
0
 def __init__(self) -> None:
     RefreshProgramLogs()
     super().__init__()
     self.video = cv2.VideoCapture(self.source)
     self.model = Model(utilsdir=c.UTILSDIR,
                        modeldir=c.MODELDIR,
                        weights=c.WEIGHTS,
                        cfg=c.CFG,
                        labelsdir=c.LABELSDIR,
                        coco=c.COCONAMES,
                        cuda=False)
     self.active_thread_count: int = None
     self.p_time: float = 0
     self.frame_counter: int = 0
Exemple #7
0
def test_mapping_by_name():
    clear()

    m = Model.add_host('alice')
    n = Laboratory.add_device('node1')

    mi = m.add_interface('eth')
    ni = n.add_interface('eth1')

    bind('alice', 'node1')
    bind('alice.eth', 'node1.eth1')

    assert(m.bound() == n)
    assert(n.bound() == m)

    assert(mi.bound() == ni)
    assert(ni.bound() == mi)
class Predict():

    def __init__(self):


        #load the model
        self.model = Model()
        self.model.model_load("./inference_models/ecgnet_0_fold_0.6078759902401878.pt")

        #load preprocessng pipeline

        #load thresholds
        self.postptocessing = PostProcessing(0)

        # threshold = 0
        # for fold in range(6):
        #     threshold += float(open(f"threshold_{fold}.txt", "r").read())/6
        #
        # self.postptocessing.threshold = threshold


    def predict(self,signal,meta):

        ############## Preprocessing ##############
        #downsampling
        X_resampled = np.zeros((signal.shape[0] // 2, 12))
        for i in range(12):
            X_resampled[:, i] = self.resampling.downsample(signal[:, 0], order=2)

        #apply preprocessing
        signal = self.apply_amplitude_scaling(X=X_resampled,y=meta)

        # padding
        sig_length = 19000

        if X_resampled.shape[0] < sig_length:
            padding = np.zeros((sig_length - X_resampled.shape[0], X_resampled.shape[1]))
            X = np.concatenate([X_resampled, padding], axis=0)
        if X_resampled.shape[0] > sig_length:
            X_resampled = X_resampled[:sig_length, :]

        ############## Predictions ##############
        predict = self.model.predict(X_resampled)

        ############## Postprocessing ##############

        predict = self.postptocessing.run(predict)
        predict = list(predict)

        if predict[4] > 0 or predict[18] > 0:
            predict[4] = 1
            predict[18] = 1
        if predict[23] > 0 or predict[12] > 0:
            predict[23] = 1
            predict[12] = 1
        if predict[26] > 0 or predict[13] > 0:
            predict[26] = 1
            predict[13] = 1

        return predict

    @staticmethod
    def apply_amplitude_scaling(X, y):
        """Get rpeaks for each channel and scale waveform amplitude by median rpeak amplitude of lead I."""
        if y['rpeaks']:
            for channel_rpeaks in y['rpeaks']:
                if channel_rpeaks:
                    return X / np.median(X[y['rpeaks'][0], 0])
        return (X - X.mean()) / X.std()
Exemple #9
0
    def train(self, positive_dir, negative_dir, hnm_dir):
        model = Model.svc()

        batch_size = 6000
        increment = batch_size // 100

        dir_walk_mgr = RecursiveDirectoryWalkerManager()

        # Get positive samples
        print('Loading positive samples...')
        i = batch_size
        samples = None
        p_len = 0
        while i:
            if i % increment == 0:
                sys.stdout.write('.')
                sys.stdout.flush()
            f = dir_walk_mgr.get_a_file(directory=positive_dir,
                                        filters=['.jpg'])
            if f is None:
                print('Not enough positive samples T_T')
                break
            img = cv2.imread(os.path.normpath(f.path),
                             1)  # Load as RGB for compatibility
            if img is None:
                continue

            gray = ImageUtilities.preprocess(img,
                                             convert_gray=cv2.COLOR_RGB2YCrCb)
            gray = imresize(gray, HyperParam.window_size)

            for j in range(3):
                intensity = 0.3 * j

                img = ImageUtilities.transform(gray, intensity=intensity)
                if p_len < 100:
                    cv2.imwrite('./preview/' + str(p_len).zfill(2) + '.jpg',
                                img)
                hist = self.compute_hog(img)
                if samples is None:
                    samples = np.zeros((batch_size, ) + hist.shape,
                                       dtype=np.float32)
                samples[p_len, :] = hist
                p_len += 1
                i -= 1
                if p_len >= len(samples): break

        print(samples.shape)
        positive_samples = np.copy(samples[0:p_len, :])
        print('Positive samples loaded:', positive_samples.shape)

        # Get negative samples
        print('Loading negative samples...')
        samples = None
        n_len = p_len * 10
        i = n_len
        pt = 0
        while i:
            if i % increment == 0:
                sys.stdout.write('.')
                sys.stdout.flush()
            f = dir_walk_mgr.get_a_file(directory=negative_dir,
                                        filters=['.jpg'])
            if f is None:
                print('Not enough negative samples T_T')
                break
            img = cv2.imread(os.path.normpath(f.path),
                             1)  # Load as RGB for compatibility
            if img is None:
                continue

            gray = ImageUtilities.preprocess(img,
                                             convert_gray=cv2.COLOR_RGB2YCrCb)
            gray = imresize(gray, HyperParam.window_size)
            hist = self.compute_hog(gray)
            if samples is None:
                samples = np.zeros((n_len, ) + hist.shape, dtype=np.float32)
            try:
                samples[pt, :] = hist.ravel()
            except:
                pass
            pt += 1
            i -= 1

        print('Negative samples loaded:', samples.shape)
        samples = np.concatenate([positive_samples, samples])

        # Get hard-negative-mining samples
        for di in range(10):
            directory = os.path.normpath(
                os.path.join(hnm_dir,
                             str(di + 1).zfill(4)))
            if not os.path.isdir(directory):
                break
            print('Loading hard-negative-mining samples...', directory)

            hnm_samples = None
            t_len = batch_size * 10
            i = t_len
            print('target sample size', i)
            pt = 0
            while i:
                if i % increment == 0:
                    sys.stdout.write('.')
                    sys.stdout.flush()

                f = dir_walk_mgr.get_a_file(directory=directory,
                                            filters=['.jpg'])
                if f is None:
                    print('Not enough hard-negative-mining samples T_T')
                    break

                img = cv2.imread(os.path.normpath(f.path),
                                 1)  # Load as RGB for compatibility
                if img is None:
                    continue

                gray = ImageUtilities.preprocess(
                    img, convert_gray=cv2.COLOR_RGB2YCrCb)
                gray = imresize(gray, HyperParam.window_size)
                hist = self.compute_hog(gray)
                if hnm_samples is None:
                    hnm_samples = np.zeros((t_len, ) + hist.shape,
                                           dtype=np.float32)
                try:
                    hnm_samples[pt, :] = hist.ravel()
                except:
                    pass
                pt += 1
                i -= 1

            hnm_samples = np.copy(hnm_samples[0:pt, :])
            print('HNM samples loaded:', hnm_samples.shape)
            samples = np.concatenate([samples, hnm_samples])

        print('Total samples:', samples.shape)

        # Convert to numpy array of float32 and create labels
        labels = np.zeros((samples.shape[0], ), dtype=np.int32)
        labels[0:p_len] = 1

        # Shuffle Samples
        rand = np.random.RandomState(321)
        shuffle = rand.permutation(len(samples))
        samples = samples[shuffle]
        labels = labels[shuffle]

        print(samples.shape)
        print(labels.shape)
        print('Training...')

        # Create SVM classifier
        model.fit(samples, labels)
        print(model.best_score_)
        with open('svm.dat', 'wb') as f:
            pickle.dump(model, f)
        '''td = cv2.TrainData.create(InputArray samples, int layout, InputArray responses, InputArray varIdx=noArray(), InputArray sampleIdx=noArray(), InputArray sampleWeights=noArray(), InputArray varType=noArray())
Exemple #10
0
def draw_feature(path='train_log/models/best-accuracy',
                 scale=1,
                 data_set='fashion_mnist'):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    checkpoint = torch.load(path, map_location=device)
    data_loader_train, data_loader_test, data_train, data_test = load_dataset(
        data_set=data_set)

    net = Model(n_feature)
    net.load_state_dict(checkpoint['model_state_dict'])
    exact_list = ['feature']
    feature_extractor = FeatureExtractor(net, exact_list)
    feature_extractor.to(device)

    # get weight
    weight = checkpoint['model_state_dict']['pred.weight'].to('cpu').data
    weight_norm = weight / torch.norm(weight, dim=1, keepdim=True)
    print("weight_norm: ", torch.norm(weight, dim=1))

    # get feature
    features = []
    labels = []
    for data in data_loader_train:
        X_train, y_train = data
        X_train = Variable(X_train).to(device)
        outputs = feature_extractor(X_train)['feature'].data
        features.append(outputs)
        labels.append(y_train)
    features = torch.cat(features, dim=0).to('cpu').data
    features_norm = features / torch.norm(features, dim=1, keepdim=True)
    features = features.numpy()
    features_norm = features_norm.numpy()
    labels = torch.cat(labels, dim=0).to('cpu').data.numpy()

    # draw features
    label_list = get_label_list(data_set)

    plt.figure(1, figsize=(20, 20))
    plt.subplot(221)
    for i in range(10):
        plt.plot([0, scale * weight[i, 0]], [0, scale * weight[i, 1]],
                 color=color_list[i])
        feature = features[labels == i]
        plt.scatter(feature[:, 0],
                    feature[:, 1],
                    c=color_list[i],
                    marker='.',
                    label=label_list[i],
                    s=1)
        plt.legend()

    plt.subplot(223)
    for i in range(10):
        plt.plot([0, weight_norm[i, 0]], [0, weight_norm[i, 1]],
                 color=color_list[i])
        feature = features_norm[labels == i]
        plt.scatter(feature[:, 0],
                    feature[:, 1],
                    c=color_list[i],
                    marker='.',
                    label=label_list[i],
                    s=1)
        plt.legend()

    # get feature
    features = []
    labels = []
    for data in data_loader_test:
        X_test, y_test = data
        X_test = Variable(X_test).to(device)
        outputs = feature_extractor(X_test)['feature'].data
        features.append(outputs)
        labels.append(y_test)
    features = torch.cat(features, dim=0).to('cpu').data
    features_norm = features / torch.norm(features, dim=1, keepdim=True)
    features = features.numpy()
    features_norm = features_norm.numpy()
    labels = torch.cat(labels, dim=0).to('cpu').data.numpy()

    plt.subplot(222)
    for i in range(10):
        plt.plot([0, scale * weight[i, 0]], [0, scale * weight[i, 1]],
                 color=color_list[i])
        feature = features[labels == i]
        plt.scatter(feature[:, 0],
                    feature[:, 1],
                    c=color_list[i],
                    marker='.',
                    label=label_list[i],
                    s=1)
        plt.legend()

    plt.subplot(224)
    for i in range(10):
        plt.plot([0, weight_norm[i, 0]], [0, weight_norm[i, 1]],
                 color=color_list[i])
        feature = features_norm[labels == i]
        plt.scatter(feature[:, 0],
                    feature[:, 1],
                    c=color_list[i],
                    marker='.',
                    label=label_list[i],
                    s=1)
        plt.legend()

    title = os.path.basename(os.getcwd()) + '-' + os.path.basename(path)
    plt.suptitle(title)

    fname = 'train_log/feature-{}'.format(os.path.basename(path))
    figname = 'train_log/{}.png'.format(fname)

    os.remove(figname) if os.path.exists(figname) else None
    plt.savefig(fname)
    plt.close('all')