def features(): response = { 'id': em.data.index.name or 'index', 'label': em.label_name, 'features': list(em.default_data.keys()) } return jsonify(response)
def sample_table(): #N = request.args.get('N', 10, int) sample_ids = random.sample(range(1, len(em.data)), 10) sample_table = [] for ids in sample_ids: sample_table.append(explainid_non_flask(str(ids))) return jsonify(sample_table)
def explain(): data = dict(ChainMap(request.args, em.default_data)) data = em.cast_dct(data) probability, explanation = em.explain_dct(data) return jsonify({'data': dict(data), 'probability': probability, 'explanation': explanation})
def predict(args): #data = dict(ChainMap(request.args, em.default_data)) data = dict(ChainMap(args, em.default_data)) data = em.cast_dct(data) probability, explanation = em.explain_dct(data) return jsonify({'probability': probability}) #test #x={"StreamingTV":"No","MonthlyCharges":70.35,"PhoneService":"No","PaperlessBilling":"No","Partner":"No","OnlineBackup":"No","gender":"Female","Contract":"Month-to-month","TotalCharges":1397.475,"StreamingMovies":"No","DeviceProtection":"No","PaymentMethod":"Bank transfer (automatic)","tenure":29,"Dependents":"No","OnlineSecurity":"No","MultipleLines":"No","InternetService":"DSL","SeniorCitizen":"No","TechSupport":"No"} #predict(x)
def predict(args): im = Image.open(BytesIO(base64.b64decode(args['image'][22:]))) im_array = np.asarray(im) print(im_array.shape) im_float = im_array.astype('float32') img_tensor = np.expand_dims(im_float, axis=0) im_normalised = (img_tensor / 256) activations = activation_model.predict(im_normalised) result_layer = activations[-1] result = result_layer[0, :] return jsonify({'prediction': result})
def predict(args): im = Image.open(BytesIO(base64.b64decode(args['image'][22:]))) im_blur = im.filter(ImageFilter.GaussianBlur(5)) im_small = im_blur.resize((28, 28), Image.ANTIALIAS) im_bitmap = im_small.convert("L") im_array = np.asarray(im_bitmap) im_float = im_array.astype('float32') im_normalised = (im_float / 256) im_tensor = torch.tensor(im_normalised) im_tensor_shaped = im_tensor.reshape(1, 1, 28, 28) with torch.no_grad(): logps = model(im_tensor_shaped) ps = torch.exp(logps) probab = list(ps.cpu().numpy()[0]) predict_val = probab.index(max(probab)) return jsonify({'prediction': predict_val})
def explain(args): data = dict(ChainMap(args, em.default_data)) data = em.cast_dct(data) probability, class_pred, explanation = em.explain_dct_class(data) return jsonify({'data': dict(data), 'quality_prediction': class_pred, 'explanation': explanation}) #test #Poor_args={"fixedAcidity":7.4, "volatileAcidity":0.7, "citricAcid":0, "residualSugar":1.9, "chlorides":0.076, "freeSulfurDioxide":11, "totalSulfurDioxide":34, "density":0.9978, "pH":3.51, "sulphates":0.56, "Alcohol":9.4} #Excellent_args={"fixedAcidity":5.9, "volatileAcidity":0.550, "citricAcid":0.10, "residualSugar":2.2, "chlorides":0.062, "freeSulfurDioxide":39.0, "totalSulfurDioxide":51.0, "density":0.99512, "pH":3.52, "sulphates":0.76, "Alcohol":11.2} #predict(Poor_args) #predict(Excellent_args) #explain(Poor_args) #explain(Excellent_args)
def modelname(): return jsonify({'modelname': em.model_name})
def default(): return jsonify(em.default_data)
def size(): return jsonify({'size': len(em.data)})
def stats(): return jsonify(em.stats)
def categories(): return jsonify( {feat: dict(enumerate(cats)) for feat, cats in em.categories.items()})
def dataset(): return jsonify({'dataset': em.dataset})
def sample_table(): sample_ids = random.sample(range(1,len(em.data)),10) sample_table = [] for ids in sample_ids: sample_table.append(explainid(str(ids))) return jsonify(sample_table)
def predict(args): data = dict(ChainMap(args, em.default_data)) data = em.cast_dct(data) probability, class_pred, explanation = em.explain_dct_class(data) return jsonify({'quality_prediction': class_pred})