Пример #1
0
def main():
	features = lsi(tfidf('small_train.txt'), 100)
	classes = get_classes('small_train.txt', 0.7, 5)
	prob  = svm_problem(classes, features)
	param = svm_parameter('-t 0 -c 4 -b 1')
	m = svm_train(prob, param)
	test_features = lsi(tfidf('small_test.txt'), 100)
	test_classes = get_classes('small_test.txt', 0.7, 5)
	p_label, p_acc, p_val = svm_predict(test_classes, test_features, m)
def get_acc(trainfilename, testfilename, d, a, r):
  features = lsi(tfidf('small_train.txt'), d)
  classes = get_classes('small_train.txt', a, r)
  prob  = svm_problem(classes, features)
  param = svm_parameter('-t 0 -c 4 -b 1')
  m = svm_train(prob, param)
  test_features = lsi(tfidf('small_test.txt'),d)
  test_classes = get_classes('small_test.txt', a, r)
  p_label, p_acc, p_val = svm_predict(test_classes, test_features, m)
  return p_acc
Пример #3
0
def get_attendance(username, subject):
    user = db.users.find_one({"username" : username})
    attendance = user['attendance']
    sub = get_classes(username)["subjects"]
    req_sub = {}
    for i in sub:
    	if i['name'] == subject:
    		req_sub = i
    tot = len (req_sub["occurences"])
    present = 0
    absent = 0
    for k in req_sub["occurences"]:
    	for i in attendance:
    		if i["id"] == k["id"]:
    			if i["presence"] == "p":
    				present += 1
    			elif i["absent"] == "a":
    				absent +=1
    attendance_sub = { 
    					"subject": subject , 
    				    "attendance": {
    				    				"total" : tot,
    				    				"present": present,
    				    				"absent": absent,
    				    				"pending": tot - absent - present
    				    			   }
    				  }
    print dumps(attendance_sub)
    return dumps( attendance_sub )
Пример #4
0
def methods_trace(ctxt, expr=None):
    'reports calls to several dalvik method'
	
    cpt = 0
    class_list = get_classes(ctxt, expr)
    for cla in class_list:
        method_list = get_methods(ctxt, cla)
        for method in method_list:
            cpath, mname, mjni = andbug.options.parse_mquery(".".join(method.split('.')[0:-1]), method.split('.')[-1])
            cmd_hook_methods(ctxt, cpath, mname)
            cpt += 1
            #print(mname)
    andbug.screed.item("%d functions hooked" % cpt)
Пример #5
0
    def detect(self, img):
        img_h, img_w = img.shape[:2]
        inp, ox, oy, new_w, new_h = self.preprocess(img)
        inp = np.expand_dims(inp,0)
        outs = self.sess.run(self.net.prediction, feed_dict={self.inputs: inp})
        outs = outs[0]
        #print('out,',outs.shape)
        boxes = decode_boxes(outs[:, :4],self.priors)
        preds = np.argmax(outs[:, 4:], axis=1)
        confidences = np.max(outs[:, 4:], axis=1)
        #print('preds',preds.shape,confidences[:5])
        #print(preds[:5])
        # skip background class
        mask = np.where(preds > 0)
        #print('cls_mask',mask[0].shape)
        boxes = boxes[mask]
        preds = preds[mask]
        confidences = confidences[mask]

        mask = np.where(confidences >= cfgs.AnchorThreshold)
        #print('confidence mask and threshold',mask[0].shape,cfgs.AnchorThreshold)
        boxes = boxes[mask]
        preds = preds[mask]
        confidences = confidences[mask]
        #print('final score:',confidences)
        results = []
        for box, clsid, conf in zip(boxes, preds, confidences):
            xmin, ymin, xmax, ymax = box
            left = int((xmin - ox) / new_w * img_w)
            top = int((ymin - oy) / new_h * img_h)
            right = int((xmax - ox) / new_w * img_w)
            bottom = int((ymax - oy) / new_h * img_h)
            conf = float(conf)
            name, color = get_classes(clsid - 1)
            results.append({
                'left': left,
                'top': top,
                'right': right, 
                'bottom': bottom,
                'name': name,
                'color': color,
                'confidence': conf,
            })
        #results = nms(results,1-self.threshold,'Min')
        results = nms(results,self.threshold)
        results = nms(results,1- self.threshold,'Min')
        #results = soft_nms(results, self.threshold)
        #print("after nms result num: ",len(results))
        return results
Пример #6
0
args = parser.parse_args()
parser.print_help()
print('input args: ', args)

if __name__ == '__main__':
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    #   sess = tf.compat.v1.Session()
    #   K.set_session(sess)
    tf.compat.v1.keras.backend.set_session(sess)
    data_dirs = args.data_dirs
    output_representation = args.output_representation
    sample_rate = args.sample_rate
    print("sample rate", sample_rate)
    batch_size = args.batch_size
    classes = get_classes(wanted_only=True)
    model_settings = prepare_model_settings(
        label_count=len(prepare_words_list(classes)),
        sample_rate=sample_rate,
        clip_duration_ms=800,
        window_size_ms=30.0,
        window_stride_ms=10.0,
        dct_coefficient_count=80,
        num_log_mel_features=60,
        output_representation=output_representation)

    print(model_settings)

    ap = AudioProcessor(data_dirs=data_dirs,
                        wanted_words=classes,
                        silence_percentage=13.0,
Пример #7
0
    default=100,
    help='num of epoch')
args = parser.parse_args()
parser.print_help()
print('input args: ', args)

if __name__ == '__main__':
  gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
  sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
  K.set_session(sess)
  data_dirs = args.data_dirs
  output_representation = args.output_representation
  sample_rate = args.sample_rate
  batch_size = args.batch_size
  epoch = args.epoch
  classes = get_classes(wanted_only=True)
  model_settings = prepare_model_settings(
      label_count=len(prepare_words_list(classes)),
      sample_rate=sample_rate,
      clip_duration_ms=1000,
      window_size_ms=30.0,
      window_stride_ms=10.0,
      dct_coefficient_count=80,
      num_log_mel_features=60,
      output_representation=output_representation)

  print(model_settings)

  ap = AudioProcessor(
      data_dirs=data_dirs,
      wanted_words=classes,
Пример #8
0
    required=True,
    help='<Required> The list of data directories. e.g., data/train')

args = parser.parse_args()
parser.print_help()
print('input args: ', args)

if __name__ == '__main__':
  gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
  sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
  K.set_session(sess)
  data_dirs = args.data_dirs
  output_representation = args.output_representation
  sample_rate = args.sample_rate
  batch_size = args.batch_size
  classes = get_classes(wanted_only=True)
  model_settings = prepare_model_settings(
      label_count=len(prepare_words_list(classes)),
      sample_rate=sample_rate,
      clip_duration_ms=1000,
      window_size_ms=30.0,
      window_stride_ms=10.0,
      dct_coefficient_count=80,
      num_log_mel_features=60,
      output_representation=output_representation)

  print(model_settings)

  ap = AudioProcessor(
      data_dirs=data_dirs,
      wanted_words=classes,
if __name__ == '__main__':
    test_fns = sorted(glob('data/test/audio/*.wav'))
    sess = K.get_session()
    K.set_learning_phase(0)
    sample_rate = 16000
    use_tta = True
    use_speed_tta = False
    if use_speed_tta:
        tta_fns = sorted(glob('data/tta_test/audio/*.wav'))
        assert len(test_fns) == len(tta_fns)
    wanted_only = False
    extend_reversed = False
    output_representation = 'raw'
    batch_size = 384
    wanted_words = prepare_words_list(get_classes(wanted_only=True))
    classes = get_classes(wanted_only=wanted_only,
                          extend_reversed=extend_reversed)
    int2label = get_int2label(wanted_only=wanted_only,
                              extend_reversed=extend_reversed)
    model_settings = prepare_model_settings(
        label_count=len(prepare_words_list(classes)),
        sample_rate=sample_rate,
        clip_duration_ms=1000,
        window_size_ms=25.0,
        window_stride_ms=15.0,
        dct_coefficient_count=80,
        num_log_mel_features=60,
        output_representation=output_representation)
    ap = AudioProcessor(data_dirs=['data/train/audio'],
                        wanted_words=classes,
Пример #10
0
# np.log(32) ~ 3.5
# np.log(48) ~ 3.9
# 64727 training files
if __name__ == '__main__':
    # restrict gpu usage: https://stackoverflow.com/questions/34199233/how-to-prevent-tensorflow-from-allocating-the-totality-of-a-gpu-memory  # noqa
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    K.set_session(sess)
    data_dirs = ['data/train/audio']
    add_pseudo = True
    if add_pseudo:
        data_dirs.append('data/heng_pseudo')
    output_representation = 'raw'
    sample_rate = 16000
    batch_size = 384
    classes = get_classes(wanted_only=True, extend_reversed=False)
    model_settings = prepare_model_settings(
        label_count=len(prepare_words_list(classes)),
        sample_rate=sample_rate,
        clip_duration_ms=1000,
        window_size_ms=30.0,
        window_stride_ms=10.0,
        dct_coefficient_count=80,
        num_log_mel_features=60,
        output_representation=output_representation)
    ap = AudioProcessor(data_dirs=data_dirs,
                        wanted_words=classes,
                        silence_percentage=13.0,
                        unknown_percentage=60.0,
                        validation_percentage=10.0,
                        testing_percentage=0.0,
from tensorflow.python.ops import io_ops
from keras import backend as K
K.set_session(sess)
K.set_learning_phase(0)
from keras.models import load_model
from keras.applications.mobilenet import DepthwiseConv2D
from keras.activations import softmax
from model import relu6, overlapping_time_slice_stack
from classes import get_classes
from utils import smooth_categorical_crossentropy

FINAL_TENSOR_NAME = 'labels_softmax'
FROZEN_PATH = 'tf_files/frozen.pb'
OPTIMIZED_PATH = 'tf_files/optimized.pb'

wanted_classes = get_classes(wanted_only=True)
all_classes = get_classes(wanted_only=False)

model = load_model('checkpoints_186/ep-053-vl-0.2915.hdf5',
                   custom_objects={'relu6': relu6,
                                   'DepthwiseConv2D': DepthwiseConv2D,
                                   'overlapping_time_slice_stack':
                                   overlapping_time_slice_stack,
                                   'softmax': softmax,
                                   '<lambda>':
                                   smooth_categorical_crossentropy})

# rename placeholders for special prize:
# https://www.kaggle.com/c/tensorflow-speech-recognition-challenge#Prizes
# decoded_sample_data:0, taking a [16000, 1] float tensor as input,
# representing the audio PCM-encoded data.