def train(model="categorizer", file_x="data-x.csv", file_y="data-y.csv"): x = tf.contrib.learn.datasets.base.load_csv_without_header( filename=file_x, target_dtype=np.float32, features_dtype=np.float32) old_x = concatenate((mat(x.data), mat(x.target).T), axis=1) x = old_x[:, 1:] y = tf.contrib.learn.datasets.base.load_csv_without_header( filename=file_y, target_dtype=np.int32, features_dtype=np.int32) y = mat(y.target).T if shape(x)[0] != shape(y)[0]: raise NameError('matrices do not match!') data = concatenate((x, y), axis=1) data = parse.shuffle(data) train, test = parse.split(data) trainX, trainY = parse.splitLabels(train) testX, testY = parse.splitLabels(test) featureCount = shape(trainX)[1] print 'feature count ' + str(featureCount) print 'training set ' + str(shape(trainX)[0]) print 'testing set ' + str(shape(testX)[0]) classifier = common.prepare_classifier("./" + model + "-model", featureCount) print 'Training start' classifier.fit(x=trainX, y=trainY, steps=2000) print 'Training done' accuracy_score = classifier.evaluate(x=testX, y=testY)["accuracy"] print('Accuracy: {0:f}'.format(accuracy_score))
def src2blocks(source): exprs, lines = parse.split(source) root = parse.parse(exprs) translate.translate(root) syntax.check_syntax(root) return root, lines
def next(self): from parse import split ndleft = mx.nd.zeros((self.batch_size * self.data_frames, 3, self.data_shape[1], self.data_shape[0])) if self.upsample > 1: left0 = np.zeros( (self.batch_size, self.data_shape[1] * self.upsample, self.data_shape[0] * self.upsample, 3), dtype=np.float32) else: ndleft0 = mx.nd.zeros( (self.batch_size, 3, self.data_shape[1], self.data_shape[0])) if self.flow_frames > 0: ndflow = mx.nd.zeros((self.batch_size * self.flow_frames, 2, self.data_shape[1], self.data_shape[0])) right = np.zeros((self.batch_size, self.data_shape[1] * self.upsample, self.data_shape[0] * self.upsample, 3), dtype=np.float32) if self.output_depth: depth = np.zeros( (self.batch_size, self.data_shape[1] * self.data_shape[0]), dtype=np.float32) with self.env.begin() as txn: for i in range(self.batch_size): if self.cur >= len(self.idx): i -= 1 break idx = self.idx[self.cur] if self.upsample > 1: nidx = int(idx) mov = nidx / 1000000 nframe = nidx % 1000000 nframe = nframe / 10000 * 3 * 24 * 60 + nframe % 10000 if self.caps[mov].get(cv2.CAP_PROP_POS_FRAMES) != nframe: print('seek', nframe) self.caps[mov].set(cv2.CAP_PROP_POS_FRAMES, nframe) ret, frame = self.caps[mov].read() assert ret margin = (frame.shape[0] - 800) / 2 lframe, rframe = split(frame, reshape=self.base_shape, vert=True, clip=(0, margin, 960, margin + 800)) p = self.fix_p if self.output_depth: sd = txn.get('%09d' % idx, db=self.ddb) assert sd is not None _, dimg = mx.recordio.unpack_img(sd, -1) dimg, p = crop_img(dimg, p, self.data_shape, self.margin, test=self.test_mode) depth[i] = dimg.flat if self.upsample > 1: rimg, p = crop_img(rframe, p, (self.data_shape[0] * self.upsample, self.data_shape[1] * self.upsample), 0, test=self.test_mode, grid=self.upsample) right[i] = rimg else: sr = txn.get('%09d' % idx, db=self.rdb) assert sr is not None _, rimg = mx.recordio.unpack_img(sr, 1) rimg, p = crop_img(rimg, p, self.data_shape, 0, test=self.test_mode) right[i] = rimg for j in range(max(1, self.data_frames)): sl = txn.get('%09d' % (idx + (j - self.data_frames / 2) * self.stride), db=self.ldb) if sl is None: pass else: _, s = mx.recordio.unpack(sl) mx.nd.imdecode(s, clip_rect=(p[0], p[1], p[0] + self.data_shape[0], p[1] + self.data_shape[1]), out=ndleft, index=i * self.data_frames + j, channels=3, mean=self.left_mean_nd) if self.upsample > 1: limg, p = crop_img(lframe, p, (self.data_shape[0] * self.upsample, self.data_shape[1] * self.upsample), 0, test=self.test_mode, grid=self.upsample) left0[i] = limg else: start = i * max(1, self.data_frames) + max( 1, self.data_frames) / 2 ndleft0[i:( i + 1)] = ndleft[start:(start + 1)] + self.left_mean_nd_1 for j in range(self.flow_frames): sf = txn.get('%09d' % (idx + (j - self.flow_frames / 2) * self.stride), db=self.fdb) if sf is None: pass else: _, s = mx.recordio.unpack(sf) mx.nd.imdecode(s, clip_rect=(p[0], p[1], p[0] + self.data_shape[0], p[1] + self.data_shape[1]), out=ndflow, index=i * self.flow_frames + j, channels=2, mean=self.flow_mean_nd) self.cur += 1 data = [] if self.data_frames > 0: ndleft = ndleft.reshape((self.batch_size, self.data_frames * 3, self.data_shape[1], self.data_shape[0])) data.append(ndleft) if self.flow_frames > 0: ndflow = ndflow.reshape((self.batch_size, self.flow_frames * 2, self.data_shape[1], self.data_shape[0])) data.append(ndflow) if self.upsample > 1: data.append(mx.nd.array(left0.transpose((0, 3, 1, 2)))) elif not self.no_left0: data.append(ndleft0) right = right.transpose((0, 3, 1, 2)) if self.right_whiten: right -= self.right_mean i += 1 pad = self.batch_size - i if pad: raise StopIteration if self.output_depth: return mx.io.DataBatch( data, [mx.nd.array(right), mx.nd.array(depth)], pad, None) else: return mx.io.DataBatch(data, [mx.nd.array(right)], pad, None)
def src2blocks(source): exprs,lines = parse.split(source) root = parse.parse(exprs) translate.translate(root) syntax.check_syntax(root) return root,lines
def next(self): from parse import split ndleft = mx.nd.zeros((self.batch_size*self.data_frames, 3, self.data_shape[1], self.data_shape[0])) if self.upsample > 1: left0 = np.zeros((self.batch_size, self.data_shape[1]*self.upsample, self.data_shape[0]*self.upsample, 3), dtype=np.float32) else: ndleft0 = mx.nd.zeros((self.batch_size, 3, self.data_shape[1], self.data_shape[0])) if self.flow_frames > 0: ndflow = mx.nd.zeros((self.batch_size*self.flow_frames, 2, self.data_shape[1], self.data_shape[0])) right = np.zeros((self.batch_size, self.data_shape[1]*self.upsample, self.data_shape[0]*self.upsample, 3), dtype=np.float32) if self.output_depth: depth = np.zeros((self.batch_size, self.data_shape[1]*self.data_shape[0]), dtype=np.float32) with self.env.begin() as txn: for i in range(self.batch_size): if self.cur >= len(self.idx): i -= 1 break idx = self.idx[self.cur] if self.upsample > 1: nidx = int(idx) mov = nidx/1000000 nframe = nidx%1000000 nframe = nframe/10000*3*24*60 + nframe%10000 if self.caps[mov].get(cv2.CAP_PROP_POS_FRAMES) != nframe: print 'seek', nframe self.caps[mov].set(cv2.CAP_PROP_POS_FRAMES, nframe) ret, frame = self.caps[mov].read() assert ret margin = (frame.shape[0] - 800)/2 lframe, rframe = split(frame, reshape=self.base_shape, vert=True, clip=(0, margin, 960, margin+800)) p = self.fix_p if self.output_depth: sd = txn.get('%09d'%idx, db=self.ddb) assert sd is not None _, dimg = mx.recordio.unpack_img(sd, -1) dimg, p = crop_img(dimg, p, self.data_shape, self.margin, test=self.test_mode) depth[i] = dimg.flat if self.upsample > 1: rimg, p = crop_img(rframe, p, (self.data_shape[0]*self.upsample, self.data_shape[1]*self.upsample), 0, test=self.test_mode, grid=self.upsample) right[i] = rimg else: sr = txn.get('%09d'%idx, db=self.rdb) assert sr is not None _, rimg = mx.recordio.unpack_img(sr, 1) rimg, p = crop_img(rimg, p, self.data_shape, 0, test=self.test_mode) right[i] = rimg for j in range(max(1, self.data_frames)): sl = txn.get('%09d'%(idx+(j-self.data_frames/2)*self.stride), db=self.ldb) if sl is None: pass else: _, s = mx.recordio.unpack(sl) mx.nd.imdecode(s, clip_rect=(p[0], p[1], p[0] + self.data_shape[0], p[1] + self.data_shape[1]), out=ndleft, index=i*self.data_frames+j, channels=3, mean=self.left_mean_nd) if self.upsample > 1: limg, p = crop_img(lframe, p, (self.data_shape[0]*self.upsample, self.data_shape[1]*self.upsample), 0, test=self.test_mode, grid=self.upsample) left0[i] = limg else: start = i*max(1, self.data_frames)+max(1, self.data_frames)/2 ndleft0[i:(i+1)] = ndleft[start:(start+1)] + self.left_mean_nd_1 for j in range(self.flow_frames): sf = txn.get('%09d'%(idx+(j-self.flow_frames/2)*self.stride), db=self.fdb) if sf is None: pass else: _, s = mx.recordio.unpack(sf) mx.nd.imdecode(s, clip_rect=(p[0], p[1], p[0] + self.data_shape[0], p[1] + self.data_shape[1]), out=ndflow, index=i*self.flow_frames+j, channels=2, mean=self.flow_mean_nd) self.cur += 1 data = [] if self.data_frames > 0: ndleft = ndleft.reshape((self.batch_size, self.data_frames*3, self.data_shape[1], self.data_shape[0])) data.append(ndleft) if self.flow_frames > 0: ndflow = ndflow.reshape((self.batch_size, self.flow_frames*2, self.data_shape[1], self.data_shape[0])) data.append(ndflow) if self.upsample > 1: data.append(mx.nd.array(left0.transpose((0, 3, 1, 2)))) elif not self.no_left0: data.append(ndleft0) right = right.transpose((0, 3, 1, 2)) if self.right_whiten: right -= self.right_mean i += 1 pad = self.batch_size - i if pad: raise StopIteration if self.output_depth: return mx.io.DataBatch(data, [mx.nd.array(right), mx.nd.array(depth)], pad, None) else: return mx.io.DataBatch(data, [mx.nd.array(right)], pad, None)