Esempio n. 1
0
def decode_segmap(label_mask, dataset, plot=False):
    if dataset == 'pascal' or dataset == 'coco':
        n_classes = 21
        label_colours = get_pascal_labels()
    elif dataset == 'cityscapes':
        n_classes = 19
        label_colours = get_cityscapes_labels()
    else:
        raise NotImplementedError

    r = label_mask.copy()
    g = label_mask.copy()
    b = label_mask.copy()
    for ll in range(0, n_classes):
        r[label_mask == ll] = label_colours[ll, 0]
        g[label_mask == ll] = label_colours[ll, 1]
        b[label_mask == ll] = label_colours[ll, 2]
    rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
    rgb[:, :, 0] = r / 255.0
    rgb[:, :, 1] = g / 255.0
    rgb[:, :, 2] = b / 255.0
    if plot:
        plt.imshow(rgb)
        plt.show()
    else:
        return (rgb * 255).astype(np.uint8)
Esempio n. 2
0
def plot_loss_acc(hist):
    loss = hist.history['loss']
    acc = hist.history['acc']
    val_loss = hist.history['val_loss']
    val_acc = hist.history['val_acc']

    # make a figure
    fig = plt.figure(figsize=(8, 4))
    # subplot loss
    ax1 = fig.add_subplot(121)
    ax1.plot(loss, label='train_loss')
    ax1.plot(val_loss, label='val_loss')
    ax1.set_xlabel('Epochs')
    ax1.set_ylabel('Loss')
    ax1.set_title('Loss on Training and Validation Data')
    ax1.legend()
    # subplot acc
    ax2 = fig.add_subplot(122)
    ax2.plot(acc, label='train_acc')
    ax2.plot(val_acc, label='val_acc')
    ax2.set_xlabel('Epochs')
    ax2.set_ylabel('Accuracy')
    ax2.set_title('Accuracy  on Training and Validation Data')
    ax2.legend()
    plt.tight_layout()
    plt.show()
Esempio n. 3
0
def plot_lines(data):
    fig = plt.figure()
    ax = plt.axes()

    x = np.linspace(0, len(data) - 1, len(data[0]))
    for ii in range(data.shape[0]):
        ax.plot(x, data[ii])
    plt.show()
Esempio n. 4
0
    def show_train_val_metric_curve(self, metric="loss", figsize=(10, 8)):
        fig, ax = plt.subplots(figsize=figsize)

        train_pd = self.get_records_dataframe()
        val_pd = self.get_records_dataframe(training=False)

        train_pd.plot(y=metric, ax=ax, label=f"train-{metric}")
        val_pd.plot(y=metric, ax=ax, label=f"val-{metric}")
        plt.show()
Esempio n. 5
0
 def metric_auc(self, soft, target):
     print("AUC: {}".format(roc_auc_score(target, soft, average = 'micro')))
     fpr, tpr, thres = roc_curve(target, soft)
     plt.plot(fpr, tpr)
     plt.xlim([0.0,1.0])
     plt.ylim([0.0,1.0])
     plt.title('ROC Curve')
     plt.xlabel('False Positive Rate[1 - Specificity]')
     plt.ylabel('True Positive Rate[Sensitivity]')
     plt.grid(True) 
     plt.show()
Esempio n. 6
0
def plot_images(images: list, labels: list,
                prediction=None,
                cols=4,
                target_shape=None):
    # we want to plot this in grid of shape (?, 4) by default
    if prediction is not None:
        assert len(images) == len(labels) == len(prediction)
    else:
        assert len(images) == len(labels)

    assert not len(images) % cols

    rows = len(images) // cols
    # Create figure
    fig, axes = plt.subplots(rows, cols)
    fig.subplots_adjust(hspace=.3, wspace=.5)

    for i, ax in enumerate(axes.flat):
        if target_shape is not None:
            ax.imshow(images[i].reshape(target_shape), cmap='binary')
        else:
            ax.imshow(images[i], cmap='binary')

        # Show true and predicted classes
        if prediction is None:
            xlabel = "True: {0}".format(labels[i])
        else:
            xlabel = "True: {0}, Pred: {1}".format(labels[i], prediction[i])

        # Show the classes as the label on x_axis
        ax.set_xlabel(xlabel)

        # Remove the ticks from the plot
        ax.set_xticks([])
        ax.set_yticks([])

    plt.show()
    if ("ms" in t):
        return float(t.replace("ms", ""))
    print t
    return t


path = "/localdisk/dchenTmp/scheduling/tunableOCLbenchmarks/result"

benches = ["2mm"]
kernels = {"2mm": ["mm2_kernel1", "mm2_kernel2"]}

for name in benches:
    for kernel in kernels[name]:
        times = []
        for file in os.listdir(path):
            if (name in file):
                f = open(path + "/" + file, "r")
                tmp = 0
                cnt = 0
                for line in f:
                    if (kernel in line):
                        cnt += 1
                        tmp += timeToFloat(line.split()[1])
                tmp = tmp / cnt
                times.append(tmp)
        plt.plot(times)

plt.show()

#print file.replace(name + ".exe_result", "").replace(".txt","").split("_")
Esempio n. 8
0
import statsmodels.api as sm
import statsmodels.formula.api as smf
from matplotlib.pyplot import plot
from sklearn import linear_model as lm

df = pd.read_csv('/Users/bonythomas/patients.csv')
x = pd.read_csv('/Users/bonythomas/patients.csv',
                usecols=[
                    "Age", "Gender", "Smoker", "Weight", "Height",
                    "SelfAssessedHealthStatus", "Location"
                ])
y = pd.read_csv('/Users/bonythomas/patients.csv', usecols=["Systolic"])
obj_df = df.select_dtypes(include=['object']).copy()
converted_data = pd.factorize(obj_df['Gender'])[0]
converted_data1 = pd.factorize(obj_df['Location'])[0]
converted_data2 = pd.factorize(obj_df['SelfAssessedHealthStatus'])[0]
x['Gender'] = converted_data
x['Location'] = converted_data1
x['SelfAssessedHealthStatus'] = converted_data2
print('Predictors', x)
print('Predicted Value', y)
regr = lm.LinearRegression()
regr.fit(x, y)
model1 = smf.OLS(y, x)
result = model1.fit()
predictions = y - result
print('Intercept', regr.intercept_)
print('Coefficients', regr.coef_)
plot(result, predictions)
plot.show()
print(result.summary())