def feal_atemp(cls, rms, scaleto=10): """Atemporal (individual frame) featue landscape """ log_reld = op.makeLogisticFunc(scaleto, -3, 0) # Counts (feature 0..4) fealand = [0 for i in range(5)] fealand[np.argmin(rms)] = scaleto tup = [] # Proximity (feature 5..9) # Normalized and adjusted to smooth implicit water A, B = np.argsort(rms)[:2] prox = op.makeLogisticFunc(scaleto, scaleto, (rms[B]+rms[A])/2) for d in rms: tup.append(prox(d)) # Relative Distance (akin to LLE) (feature 10..19) for a in range(4): for b in range(a+1, 5): rel_dist = rms[a]-rms[b] tup.append(log_reld(rel_dist)) fealand.extend(tup) # Additional Feature Spaces Would go here return np.array(fealand) # Tuple or NDArray?
def feal_atemp(rms, scaleto=10): """Atemporal (individual frame) featue landscape """ log_reld = op.makeLogisticFunc(scaleto, -3, 0) # Counts (feature 0..4) fealand = [0 for i in range(5)] fealand[np.argmin(rms)] = scaleto tup = [] # Proximity (feature 5..9) for n, dist in enumerate(rms): # tup.append(log_prox(dist)) maxd = 10. #11.34 # tup.append(scaleto*max(maxd-dist, 0)/maxd) tup.append(max(maxd-dist, 0)) # Relative Distance (akin to LLE) (feature 10..19) for a in range(4): for b in range(a+1, 5): rel_dist = rms[a]-rms[b] tup.append(log_reld(rel_dist)) fealand.extend(tup) # Additional Feature Spaces Would go here return np.array(fealand) # Tuple or NDArray?
def atemporal2(cls, rms, scaleto=10): """Atemporal (individual frame) featue landscape """ log_reld = op.makeLogisticFunc(scaleto, -3, 0) maxd = 20 mind = 10 fealand = [0 for i in range(5)] fealand[np.argmin(rms)] = scaleto tup = [] # Proximity for dist in rms: fealand.append(scaleto * max(maxd - (max(dist, mind)), 0) / (maxd - mind)) # Additional Feature Spaces for a in range(4): for b in range(a + 1, 5): rel_dist = rms[a] - rms[b] tup.append(log_reld(rel_dist)) fealand.extend(tup) # Additional Feature Spaces Would go here return np.array(fealand) # Tuple or NDArray?
def atemporal2(cls, rms, scaleto=10): """Atemporal (individual frame) featue landscape """ log_reld = op.makeLogisticFunc(scaleto, -3, 0) maxd = 20 mind = 10 fealand = [0 for i in range(5)] fealand[np.argmin(rms)] = scaleto tup = [] # Proximity for dist in rms: fealand.append(scaleto*max(maxd-(max(dist, mind)), 0)/(maxd-mind)) # Additional Feature Spaces for a in range(4): for b in range(a+1, 5): rel_dist = rms[a]-rms[b] tup.append(log_reld(rel_dist)) fealand.extend(tup) # Additional Feature Spaces Would go here return np.array(fealand) # Tuple or NDArray?
basin_score_well = np.zeros(91116) basin_score_tran = np.zeros(91116) multi_basins = [] max_sigma = np.max(sigma) cluster_score_well, cluster_score_tran = np.zeros(len(cluk)), np.zeros( len(cluk)) C_ws, C_wv = .4, .6 C_ts, C_tv = .1, .90 B_wd, B_wv = .9, .1 B_td, B_tv = .25, .75 max_var1 = np.max(variance[np.nonzero(variance)]) max_var2 = np.max(variance2[np.nonzero(variance2)]) sizeFunc = op.makeLogisticFunc(2, 0.001, 91116 / len(clulist)) logging.info("CLUSTER_SUMMARY %d %d", support, num_clu) for n, k in enumerate(cluk): cSize = len(clulist[n]) cVar = variance2[n] sc_var = 0 if cVar == 0 else cVar / max_var2 sc_size = sizeFunc( cSize) - 1 # max(-2, 1 - (len(cluk)*cSize / (91116))) cluster_score_well[n] = max(0, C_wv * (1 - sc_var) + C_ws * sc_size) cluster_score_tran[n] = max(0, C_tv * sc_var + C_ts * sc_size) # cnt = np.bincount(list(it.chain(*[[c for c in dL[i]] for i in clulist[n]]))) cnt = np.bincount([getstate(i) for i in clulist[n]], minlength=6) logging.info( '%2d %15s | sz=%6d | var=%7.2f | scW=%5.2f scT%5.2f) %s' % (n, k, cSize, cVar, cluster_score_well[n], cluster_score_tran[n], cnt / sum(cnt)))
basin_score_well = np.zeros(91116) basin_score_tran = np.zeros(91116) multi_basins = [] max_sigma = np.max(sigma) cluster_score_well, cluster_score_tran = np.zeros(len(cluk)), np.zeros(len(cluk)) C_ws, C_wv = .4, .6 C_ts, C_tv = .1, .90 B_wd, B_wv = .9, .1 B_td, B_tv = .25, .75 max_var1 = np.max(variance[np.nonzero(variance)]) max_var2 = np.max(variance2[np.nonzero(variance2)]) sizeFunc = op.makeLogisticFunc(2, 0.001, 91116/len(clulist)) logging.info("CLUSTER_SUMMARY %d %d", support, num_clu) for n, k in enumerate(cluk): cSize = len(clulist[n]) cVar = variance2[n] sc_var = 0 if cVar == 0 else cVar / max_var2 sc_size = sizeFunc(cSize) -1 # max(-2, 1 - (len(cluk)*cSize / (91116))) cluster_score_well[n] = max(0, C_wv *(1-sc_var) + C_ws * sc_size) cluster_score_tran[n] = max(0, C_tv * sc_var + C_ts * sc_size) # cnt = np.bincount(list(it.chain(*[[c for c in dL[i]] for i in clulist[n]]))) cnt = np.bincount([getstate(i) for i in clulist[n]], minlength=6) logging.info('%2d %15s | sz=%6d | var=%7.2f | scW=%5.2f scT%5.2f) %s' % (n, k, cSize, cVar, cluster_score_well[n], cluster_score_tran[n], cnt/sum(cnt))) max_dist = np.max([s for _,s in elmlist[n]]) max_sigma = np.max([sigma[i] for i,_ in elmlist[n]]) for i, s in elmlist[n]: