Esempio n. 1
0
def make_plot(prediction, true_label):
    fig, axes = plt.plot(1, 2)
    axes[0, 0].imshow(prediction.reshape(76, 23, 3))
    axes[0, 0].title("prediction")
    axes[0, 1] = plt.inshow(true_label.reshape(76, 23, 3))
    axes[0, 1].title("ground truth")
    return fig
Esempio n. 2
0
         normed=True,
         alpha=0.5,
         lable='test')
plt.title('Normalised histogram of word count in questions', fontsize=15)
plt.legend()
plt.xlabel('Number of words', fontsize=15)
plt.ylabel('Probability', fontsize=15)
print(
    'mean-train {:.2f} std-train {:.2f} mean-test {:.2f} std-test {:.2f} max-train {:.2f} max-test {:.2f}'
    .format(dist_train.mean(), dist_train.std(), dist_test.mean(),
            dist_test.std(), dist_train.max(), dist_test.max()))

cloud = WordCloud(width=1440,
                  height=1080).generate(" ".join(train_qs.astype(str)))
plt.figure(figsize=(20, 15))
plt.inshow(cloud)
plt.axis('off')

# Semantic Analysis
qmarks = np.mean(train_qs.apply(lambda x: '?' in x))
math = np.mean(train_qs.apply(lambda x: '[math]' in x))
fullstop = np.mean(train_qs.apply(lambda x: '.' in x))
capital_first = np.mean(train_qs.apply(lambda x: x[0].isupper()))
capitals = np.mean(train_qs.apply(lambda x: max([y.isupper() for y in x])))
numbers = np.mean(train_qs.apply(lambda x: max([y.isdigit() for y in x])))

print('Questions with question marks: {:.2f}%'.format(qmarks * 100))
print('Questions with [math] tags: {:.2f}%'.format(math * 100))
print('Questions with full stops: {:.2f}%'.format(fullstop * 100))
print('questions with capitalised first letters: {:.2f}%'.format(
    capital_first * 100))
def plot_image(image):
    plt.figure(figsize=(12, 4))
    plt.inshow(image, cmap='gray')
    plt.axis('off')
    plt.show()
Esempio n. 4
0
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
%matplotlib inline

# using pandas to read the database stored in the same folder
data = pd.read_csv('mnist.csv')

# viewing coloumn heads
data.head()

# extracting data from the data from the dataset and viewing them up close
a = data.iloc[3,1:].values

# reshaping the extracted data into a reasonable size
a = a.reshape(28,28).astype('uint8')
plt.inshow(a)

# preparing the data
# separating labels and data values
df_x = data.iloc[:,1:]
df_y = data.iloc[:,0]

# creating test amd train sizes/batches
x_train, x_test, y_train, y_test = train_test_split(df_x, df_y, test_size = 0.2, random_state=4)

# check data
y_train.head()

# call rf classifier
rf = RandomForestClassifier(n_estimators=100)
Esempio n. 5
0
pascal_root = voc_root

# initialize caffe for gpu mode
caffe.set_mode_cpu()
# caffe.set_mode_gpu()
# caffe.set_device(0)

workdir = './genderage_multilabel_with_datalayer'

net = caffe.Net(caffe_deploy, caffe_modelcaffe, caffe.TEST)

transform = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transform.set_transpose('data', (2, 0, 1))
transform.set_raw_scale('data', 255)
transform.set_channel_swap('data', (2, 1, 0))

# 把加载到的图片缩放到固定的大小
net.blobs['data'].reshape(1, 2, 100, 100)

image = caffe.io.load_image('/opt/data/person/1.jpg')
transformed_image = transform.preprocess('data', image)
plt.inshow(image)

# 把警告过transform.preprocess处理过的图片加载到内存
net.blobs['data'].data[...] = transformed_image

output = net.forward()

# 因为这里仅仅测试了一张图片
# output_pro的shape中有对于1000个object相似的概率
output_pro = output['prob'][0]
Esempio n. 6
0
X = dataset.data
y = dataset.target


# In[14]:


dir(dis)


# In[17]:


img=ds.images
img.shape
plt.imshow(imag[0],cmap=plt.get_cmap(nipy_spectral',5))


# In[18]:


plt.inshow(img[0],cmp=plt.cm.biary)


# In[ ]:




Esempio n. 7
0
import cv2
import matplotlib.pyplot as plt

# 캡처와 같은 프로그램 작성에 효과적.
# 사각형 외각 찾기
image = cv2.imread('digit_image.png')
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(image_gray, 230, 255, 0)
thresh = cv2.bitwise_not(thresh)  # 하얀색과 검정색 반전

plt.inshow(cv2.cvtColor(thresh, cv2.COLOR_GRAY2RGB))
plt.show()

contours = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
image = cv2.drawContours(image, contours, 0, (0, 0, 255),
                         4)  # 0 대신 -1 : 모든 외각에 대해 그린다

plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.show()

contour = contours[0]
x, y, w, h = cv2.boundingRect(contour)
image = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 3)

plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.show()

# Convex Hull 알고리즘으로 외곽구하기
image = cv2.imread('digit_image.png')
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(image_gray, 230, 255, 0)
Esempio n. 8
0
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
%matplotlib inline

#using pandas to read the database
data = pd.read_csv('mnist.csv')

#column viewing
data.head()

#extracting data
a = data.iloc[3,1:].values

#reshape the data into a resonable size
a = a.reshape(28,28).astype('uint8')
plt.inshow()

#separation labels and data values
df_x = data.iloc[:,1:]
df_y = data.iloc[:,0]
x_train,x_test,y_train,y_test = train_test_split(df_x,df_y,test_size = 0.2,random_state = 4)
x_train.head()
y_train.head()

#classifier
rf = RandomClassifier(n_estimator = 100)
rf.fit(x_train,y_train)
pred = rf.predict(x_test)
print(pred)
#prediction accuracy
a = y_test.values