def get_response_content(fs): # read the table rtable = RUtil.RTable(fs.table.splitlines()) header_row = rtable.headers data_rows = rtable.data Carbone.validate_headers(header_row) # get the numpy array of conformant points h_to_i = dict((h, i + 1) for i, h in enumerate(header_row)) axis_headers = fs.axes if not axis_headers: raise ValueError('no Euclidean axes were provided') axis_set = set(axis_headers) header_set = set(header_row) bad_axes = axis_set - header_set if bad_axes: raise ValueError('invalid axes: ' + ', '.join(bad_axes)) axis_lists = [] for h in axis_headers: index = h_to_i[h] try: axis_list = Carbone.get_numeric_column(data_rows, index) except Carbone.NumericError: raise ValueError('expected the axis column %s ' 'to be numeric' % h) axis_lists.append(axis_list) points = np.array(zip(*axis_lists)) # find the set of indices of duplicate points dup_indices = get_dup_indices(points, fs.radius) # get the data rows with duplicate indices removed new_rows = [row for i, row in enumerate(data_rows) if i not in dup_indices] # construct the new table out = StringIO() print >> out, '\t'.join(header_row) print >> out, '\n'.join('\t'.join(row) for row in new_rows) return out.getvalue()
def get_response_content(fs): # check the r table RUtil.RTable(fs.table.splitlines()) # make the plot device = Form.g_imageformat_to_r_function[fs.imageformat] image_data = RUtil.run_plotter_concise(fs.table, g_script_body, device) return image_data
def get_response_content(fs): rtable = RUtil.RTable(fs.table.splitlines()) header_row = rtable.headers data_rows = rtable.data points = get_rtable_info(rtable, fs.annotation, fs.axes) # do the clustering cluster_map = agglom.get_initial_cluster_map(points) w_ssd_map = agglom.get_initial_w_ssd_map(points) b_ssd_map = agglom.get_initial_b_ssd_map(points) q = agglom.get_initial_queue(b_ssd_map) while len(cluster_map) > fs.k: pair = agglom.get_pair_fast(cluster_map, q) agglom.merge_fast(cluster_map, w_ssd_map, b_ssd_map, q, pair) # create the map from a point index to a cluster index point_to_cluster = {} for cluster_index, point_indices in cluster_map.items(): for point_index in point_indices: point_to_cluster[point_index] = cluster_index # define the raw labels which may be big numbers raw_labels = [point_to_cluster[i] for i, p in enumerate(points)] # rename the labels with small numbers raw_to_label = dict((b, a) for a, b in enumerate(sorted(set(raw_labels)))) labels = [raw_to_label[raw] for raw in raw_labels] # get the response lines = ['\t'.join(header_row + [fs.annotation])] for i, (label, data_row) in enumerate(zip(labels, data_rows)): row = data_row + [str(label)] lines.append('\t'.join(row)) # return the response return '\n'.join(lines) + '\n'
def get_response_content(fs): # get the r table rtable = RUtil.RTable(fs.table.splitlines()) header_row = rtable.headers data_rows = rtable.data Carbone.validate_headers(header_row) # check requested variable names as column headers if fs.var_a not in header_row: raise ValueError('the first variable name is not column header') if fs.var_b not in header_row: raise ValueError('the second variable name is not column header') return RUtil.run_with_table(fs.table, fs, get_script_content)
def main(args): """ @param args: from argparse """ # get some state that will not change between k-means restarts with open(args.table_filename) as fin: rtable = RUtil.RTable(fin) points = get_rtable_info(rtable, args.annotation, args.axes) init_strategy = kmeans.InitStrategy().string_to_function(args.kmeans_init) gs = GlobalState(rtable, points, args.annotation, args.k, init_strategy) # go until iteration is stopped for some reason print combobreaker.run_callable( ClusterState(gs), args.nseconds, args.nrestarts)
def get_response_content(fs): rtable = RUtil.RTable(fs.table.splitlines()) header_row = rtable.headers data_rows = rtable.data points = get_rtable_info(rtable, fs.annotation, fs.axes) # do the clustering nrestarts = 10 init_strategy = kmeans.InitStrategy().string_to_function(fs.kmeans_init) wcss, labels = kmeans.lloyd_with_restarts( points, fs.k, nrestarts, init_strategy) # get the response lines = ['\t'.join(header_row + [fs.annotation])] for i, (label, data_row) in enumerate(zip(labels, data_rows)): row = data_row + [str(label)] lines.append('\t'.join(row)) # return the response return '\n'.join(lines) + '\n'
def get_response_content(fs): # get the independent variable names indep = Util.get_stripped_lines(fs.independent.splitlines()) dep = fs.dependent # get the r table rtable = RUtil.RTable(fs.table.splitlines()) header_row = rtable.headers data_rows = rtable.data Carbone.validate_headers(header_row) # check requested variable names as column headers bad_indep_names = set(indep) - set(header_row) if bad_indep_names: raise ValueError('these requested independent variable names ' 'were not found as columns ' 'in the data table: ' + str(bad_indep_names)) if dep not in header_row: raise ValueError('the dependent variable name ' 'was not found as a column in the data table') return RUtil.run_with_table(fs.table, (indep, dep), get_script_content)
def process(args, table_lines): """ @param args: command line or web input @param table_lines: input lines @return: the image data as a string """ # get the table string rtable = RUtil.RTable(table_lines) plot_info = PlotInfo(args, rtable.headers, rtable.data) augmented_lines = plot_info.get_augmented_table_lines() table_string = '\n'.join(augmented_lines) # get the script string script_string = plot_info.get_script(args) # get the device device = Form.g_imageformat_to_r_function[args.imageformat] # run R and get the image data image_data = RUtil.run_plotter_concise(table_string, script_string, device) # return the image data return image_data
def get_response_content(fs): # define constants nrestarts = 10 # read the input rtable = RUtil.RTable(fs.table.splitlines()) header_row = rtable.headers data_rows = rtable.data points = get_rtable_info(rtable, 'annotation', fs.axes) # do the clustering codebook, distortion = cluster.vq.kmeans( points, fs.k, iter=nrestarts, thresh=1e-9) sqdists = kmeans.get_point_center_sqdists(points, codebook) labels = kmeans.get_labels_without_cluster_removal(sqdists) wgss = kmeans.get_wcss(sqdists, labels) norms = [np.linalg.norm(p-codebook[g]) for p, g in zip(points, labels)] redistortion = np.mean(norms) # create the response out = StringIO() print >> out, 'scipy distortion:', distortion print >> out, 'recomputed distortion:', redistortion print >> out, 'wgss:', wgss # return the response return out.getvalue()
def process(args, table_lines): """ @param args: command line or web input @param table_lines: input lines @return: the image data as a string """ rtable = RUtil.RTable(table_lines) header_row = rtable.headers data_rows = rtable.data Carbone.validate_headers(header_row) # Read the relevant columns and their labels. plot_info = PlotInfo(args, header_row, data_rows) # Get info for the temporary data augmented_lines = plot_info.get_augmented_table_lines() table_string = '\n'.join(augmented_lines) temp_table_name = Util.create_tmp_file(table_string, suffix='.table') temp_plot_name = Util.get_tmp_filename() script = plot_info.get_script(args, temp_plot_name, temp_table_name) temp_script_name = Util.create_tmp_file(script, suffix='.R') # Call R. retcode, r_out, r_err = RUtil.run(temp_script_name) if retcode: raise ValueError('R error:\n' + r_err) # Delete the temporary data table file. os.unlink(temp_table_name) # Delete the temporary script file. os.unlink(temp_script_name) # Read the image file. try: with open(temp_plot_name, 'rb') as fin: image_data = fin.read() except IOError as e: raise HandlingError('the R call seems to not have created the plot') # Delete the temporary image file. os.unlink(temp_plot_name) # Return the image data as a string. return image_data
def get_response_content(fs): # read the table rtable = RUtil.RTable(fs.table.splitlines()) header_row = rtable.headers data_rows = rtable.data Carbone.validate_headers(header_row) # get the numpy array of conformant points h_to_i = dict((h, i + 1) for i, h in enumerate(header_row)) axis_headers = fs.axes if not axis_headers: raise ValueError('no Euclidean axes were provided') axis_set = set(axis_headers) header_set = set(header_row) bad_axes = axis_set - header_set if bad_axes: raise ValueError('invalid axes: ' + ', '.join(bad_axes)) axis_lists = [] for h in axis_headers: index = h_to_i[h] try: axis_list = Carbone.get_numeric_column(data_rows, index) except Carbone.NumericError: msg_a = 'expected the axis column %s ' % h msg_b = 'to be numeric' raise ValueError(msg_a + msg_b) axis_lists.append(axis_list) points = np.array(zip(*axis_lists)) # precompute some stuff allmeandist = kmeans.get_allmeandist(points) nrestarts = 10 nseconds = 2 tm = time.time() n = len(points) wgss_list = [] # neg because both items in the pair are used for sorting neg_calinski_k_pairs = [] # look for the best calinski index in a small amount of time k = 2 while True: codebook, distortion = cluster.vq.kmeans(points, k, iter=nrestarts, thresh=1e-9) sqdists = kmeans.get_point_center_sqdists(points, codebook) labels = kmeans.get_labels_without_cluster_removal(sqdists) wgss = kmeans.get_wcss(sqdists, labels) bgss = allmeandist - wgss calinski = kmeans.get_calinski_index(bgss, wgss, k, n) k_unique = len(set(labels)) neg_calinski_k_pairs.append((-calinski, k_unique)) wgss_list.append(wgss) if time.time() - tm > nseconds: break if k == n - 1: break k += 1 max_k = k best_neg_calinski, best_k = min(neg_calinski_k_pairs) best_calinski = -best_neg_calinski # create the response out = StringIO() print >> out, 'best cluster count: k = %d' % best_k print >> out, 'searched 2 <= k <= %d clusters' % max_k print >> out, '%.2f seconds' % (time.time() - tm) if fs.verbose: print >> out print >> out, '(k_unique, wgss, calinski):' for wgss, neg_calinski_k_pair in zip(wgss_list, neg_calinski_k_pairs): neg_calinski, k_unique = neg_calinski_k_pair calinski = -neg_calinski row = [k_unique, wgss, calinski] print >> out, '\t'.join(str(x) for x in row) # return the response return out.getvalue()
def get_response_content(fs): # read the table rtable = RUtil.RTable(fs.table.splitlines()) header_row = rtable.headers data_rows = rtable.data Carbone.validate_headers(header_row) # get the numpy array of conformant points h_to_i = dict((h, i + 1) for i, h in enumerate(header_row)) axis_headers = fs.axes if not axis_headers: raise ValueError('no Euclidean axes were provided') axis_set = set(axis_headers) header_set = set(header_row) bad_axes = axis_set - header_set if bad_axes: raise ValueError('invalid axes: ' + ', '.join(bad_axes)) axis_lists = [] for h in axis_headers: index = h_to_i[h] try: axis_list = Carbone.get_numeric_column(data_rows, index) except Carbone.NumericError: raise ValueError('expected the axis column %s ' 'to be numeric' % h) axis_lists.append(axis_list) points = np.array(zip(*axis_lists)) # do the clustering while computing the wgss at each merge cluster_counts = [] wgss_values = [] allmeandist = kmeans.get_allmeandist(points) cluster_map = agglom.get_initial_cluster_map(points) w_ssd_map = agglom.get_initial_w_ssd_map(points) b_ssd_map = agglom.get_initial_b_ssd_map(points) q = agglom.get_initial_queue(b_ssd_map) while len(cluster_map) > 2: # do an agglomeration step pair = agglom.get_pair_fast(cluster_map, q) agglom.merge_fast(cluster_map, w_ssd_map, b_ssd_map, q, pair) # compute the within group sum of squares indices = cluster_map.keys() wgss = sum(w_ssd_map[i] / float(len(cluster_map[i])) for i in indices) # compute the between group sum of squares bgss = allmeandist - wgss # append to the lists cluster_counts.append(len(cluster_map)) wgss_values.append(wgss) # compute the log wgss values wlogs = np.log(wgss_values) # reverse the log values so that they are by increasing cluster size wlogs = list(reversed(wlogs)) # sample from the null distribution extents = np.max(points, axis=0) - np.min(points, axis=0) nclusters_list, expectations, thresholds = do_sampling( extents, len(points), fs.nsamples) # get the gaps gaps = np.array(expectations) - wlogs # Get the best cluster count according to the gap statistic. best_i = None criteria = [] for i, ip1 in iterutils.pairwise(range(len(nclusters_list))): k, kp1 = nclusters_list[i], nclusters_list[ip1] criterion = gaps[i] - gaps[ip1] + thresholds[ip1] criteria.append(criterion) if criterion > 0: if best_i is None: best_i = i best_k = nclusters_list[best_i] # create the response out = StringIO() print >> out, 'best cluster count: k = %d' % best_k if fs.verbose: print >> out print >> out, '(k, expected, observed, gap, threshold, criterion):' n = len(nclusters_list) for i, k in enumerate(nclusters_list): row = [k, expectations[i], wlogs[i], gaps[i], thresholds[i]] if i < n - 1: row += [criteria[i]] else: row += ['-'] print >> out, '\t'.join(str(x) for x in row) # return the response return out.getvalue()
def get_response_content(fs): # read the table rtable = RUtil.RTable(fs.table.splitlines()) header_row = rtable.headers data_rows = rtable.data Carbone.validate_headers(header_row) # get the numpy array of conformant points h_to_i = dict((h, i + 1) for i, h in enumerate(header_row)) axis_headers = fs.axes if not axis_headers: raise ValueError('no Euclidean axes were provided') axis_set = set(axis_headers) header_set = set(header_row) bad_axes = axis_set - header_set if bad_axes: raise ValueError('invalid axes: ' + ', '.join(bad_axes)) axis_lists = [] for h in axis_headers: index = h_to_i[h] try: axis_list = Carbone.get_numeric_column(data_rows, index) except Carbone.NumericError: raise ValueError('expected the axis column %s ' 'to be numeric' % h) axis_lists.append(axis_list) points = np.array(zip(*axis_lists)) # do the clustering while computing the calinski index at each merge cluster_counts = [] wgss_values = [] neg_calinskis = [] allmeandist = kmeans.get_allmeandist(points) cluster_map = agglom.get_initial_cluster_map(points) w_ssd_map = agglom.get_initial_w_ssd_map(points) b_ssd_map = agglom.get_initial_b_ssd_map(points) q = agglom.get_initial_queue(b_ssd_map) while len(cluster_map) > 2: # do an agglomeration step pair = agglom.get_pair_fast(cluster_map, q) agglom.merge_fast(cluster_map, w_ssd_map, b_ssd_map, q, pair) # compute the within group sum of squares indices = cluster_map.keys() wgss = sum(w_ssd_map[i] / float(len(cluster_map[i])) for i in indices) # compute the between group sum of squares bgss = allmeandist - wgss # get the calinksi index n = len(points) k = len(cluster_map) numerator = bgss / float(k - 1) denominator = wgss / float(n - k) calinski = numerator / denominator # append to the lists cluster_counts.append(k) wgss_values.append(wgss) neg_calinskis.append(-calinski) # Get the best cluster count according to the calinski index. # Do this trickery with negs so that it breaks ties # using the smallest number of clusters. neg_calinksi, best_k = min(zip(neg_calinskis, cluster_counts)) # create the response out = StringIO() print >> out, 'best cluster count: k = %d' % best_k if fs.verbose: print >> out print >> out, '(k, wgss, calinski):' for k, wgss, neg_calinski in zip(cluster_counts, wgss_values, neg_calinskis): row = (k, wgss, -neg_calinski) print >> out, '\t'.join(str(x) for x in row) # return the response return out.getvalue()