コード例 #1
0
ファイル: convert_sacla_hdf5.py プロジェクト: ivan-usov/sacla
    daq_info["Trans_PD2"] = {"fname": "PD.txt", "units": "V"}
    daq_info["TFY_PD9"] = {"fname": "PD9.txt", "units": "V"}
    daq_info["I0up_PD3"] = {"fname": "X41.txt", "units": "V"}
    daq_info["I0down_PD4"] = {"fname": "X42.txt", "units": "V"}
    daq_info["laserpos_h_M27"] = {"fname": "M27.txt", "units": "pulse"}
    daq_info["laserpos_v_M28"] = {"fname": "M28.txt", "units": "pulse"}

    run_list = []
    for k in f.keys():
        if k[0:3] == "run":
            run_list.append(k)

    for dname, v in daq_info.iteritems():
        df = pd.read_csv(add_files_dir + v["fname"], header=0, names=["tag", "value"], index_col="tag", )

        for r in run_list:
            tags = f[str(r) + "/event_info/tag_number_list"]
            red_df = df.loc[tags[0]:tags[-1]]
            conv_df = btc.convert(dname, red_df[:])
            dt = np.float
            if v["units"] == "bool" or v["units"] == "pulse":
                dt = np.int
            tags_dset = fout.create_dataset(str(r) + "/daq_info/" + dname, data=conv_df, chunks=True, dtype=dt)
            tags_dset.attrs["units"] = np.string_(v["units"])
            # do a write here???
            fout.flush()

    f.close()
    fout.close()
コード例 #2
0
ファイル: plot_live_daq.py プロジェクト: ivan-usov/sacla
    def __call__(self, i):
        xs = []
        ys = []
        dfs = []
        self.update_data()
        for p_i, plot in enumerate(self.plots):
            if self.data_list[p_i][plot['x']] is None:
                continue
            x = self.data_list[p_i][plot['x']][1]
            y = self.data_list[p_i][plot['y']][1]
            if plot['x'] == "Mono":
                x = beamtime_converter_201406XX.convert("energy", x)
            #print x, y
            while len(x) != len(y):
                if len(x) > len(y):
                    x.pop()
                else:
                    y.pop()
            dfs.append(pd.DataFrame(np.asarray([x, y]).T, columns=[plot['x'], plot['y']], ))
            dfs[p_i] = dfs[p_i].set_index(plot['x'])
            dfs[p_i] = dfs[p_i].sort_index()
            #if not self.average:
            xs.append(dfs[p_i].index.values)
            ys.append(dfs[p_i].values.flatten())
            #else:
            #    xs.append(dfs[p_i].index.values)
            #    ys.append(dfs[p_i].mean().values.flatten())

        if self.operation == "subtract":
            dftot = dfs[1].subtract(dfs[0], fill_value=0)
            dftot = dftot.sort_index()
            X = dftot.index
            Y = dftot.values
        else:
            X = dfs[0].index
            Y = dfs[0].values
            #print "Operation not supported, exiting"
            #sys.exit(-1)

        if self.json_name is not None:
            json_file = open(self.json_name, 'w')
            json_dict = {}
            json_dict["run"] = ""
            json_dict["name"] = "-".join(daq_quantities.keys())
            json_dict["plot_type"] = "scatter"
            json_dict["label_x"] = self.plots[0]['x']
            json_dict["label_y"] = self.plots[0]['y']
            json_dict["data"] = []
            json_dict["data"].append(X.tolist())
            json_dict["data"].append(Y.tolist())
            json.dump(json_dict, json_file)
            json_file.close()

        if self.csv_name is not None:
            import csv
            with open(self.csv_name, 'wb') as csvfile:
                spamwriter = csv.writer(csvfile, delimiter=',',
                                        quotechar='|', quoting=csv.QUOTE_MINIMAL)

                spamwriter.writerow([self.plots[0]["x"], self.plots[0]["y"]])
                for i, x_i in enumerate(X):
                    spamwriter.writerow([x_i, Y[i]])
            csvfile.close()

        if len(X) > 0 and len(Y) > 0:
            #self.ax.set_xlim(0.9 * float(min(X)), 1.1 * float(max(X)))
            self.ax.set_ylim(0.99 * float(min(Y)), 1.01 * float(max(Y)))

        if self.average:
            dftot_avg = dftot.mean(level=0)
            dftot_std = dftot.std(level=0)
            avg_x = dftot_avg.index.tolist()
            avg_y = dftot_avg[plot['y']].values.tolist()
            std_y = dftot_std[plot['y']].values.tolist()

            self.line.set_data(avg_x, avg_y)
            error_plus = np.array(avg_y) + np.array(std_y)
            error_minus = np.array(avg_y) - np.array(std_y)
            self.line.set_data(avg_x, avg_y)
            for coll in (self.ax.collections):
                self.ax.collections.remove(coll)
            self.ax.fill_between(avg_x, error_minus, error_plus, alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')

        else:
            self.line.set_data(X.tolist(), Y.tolist())

        for i, l in enumerate(self.line2):
            if dfs[i].empty:
                continue
            if i > 0:
                xmin, xmax = self.ax2.get_xlim()
                ymin, ymax = self.ax2.get_ylim()
            else:
                xmin = dfs[i].idxmin()[self.plots[i]['y']]
                xmax = dfs[i].idxmax()[self.plots[i]['y']]
                ymin = dfs[i].min()[self.plots[i]['y']]
                ymax = dfs[i].max()[self.plots[i]['y']]

            if xmin > dfs[i].idxmin()[self.plots[i]['y']]:
                xmin = dfs[i].idxmin()[self.plots[i]['y']]
            if xmax < dfs[i].idxmax()[self.plots[i]['y']]:
                xmax = dfs[i].idxmax()[self.plots[i]['y']]
            #self.ax2.set_xlim(0.9 * float(xmin), 1.1 * float(xmax))
            if ymin > dfs[i].min()[self.plots[i]['y']]:
                ymin = dfs[i].min()[self.plots[i]['y']]
            if ymax < dfs[i].max()[self.plots[i]['y']]:
                ymax = dfs[i].max()[self.plots[i]['y']]
            self.ax2.set_ylim(0.99 * float(ymin), 1.01 * float(ymax))

            print dfs[i].index.values, dfs[i].values.flatten()
            self.line2[i].set_data(dfs[i].index.values, dfs[i].values.flatten())
            #self.line2[i].set_data(xs[i], ys[i])

        return self.line, self.line2[0], self.line2[1]
コード例 #3
0
    def __call__(self, i):
        xs = []
        ys = []
        dfs = []
        self.update_data()
        for p_i, plot in enumerate(self.plots):
            if self.data_list[p_i][plot['x']] is None:
                continue
            x = self.data_list[p_i][plot['x']][1]
            y = self.data_list[p_i][plot['y']][1]
            if plot['x'] == "Mono":
                x = beamtime_converter_201406XX.convert("energy", x)
            #print x, y
            while len(x) != len(y):
                if len(x) > len(y):
                    x.pop()
                else:
                    y.pop()
            dfs.append(
                pd.DataFrame(
                    np.asarray([x, y]).T,
                    columns=[plot['x'], plot['y']],
                ))
            dfs[p_i] = dfs[p_i].set_index(plot['x'])
            dfs[p_i] = dfs[p_i].sort_index()
            #if not self.average:
            xs.append(dfs[p_i].index.values)
            ys.append(dfs[p_i].values.flatten())
            #else:
            #    xs.append(dfs[p_i].index.values)
            #    ys.append(dfs[p_i].mean().values.flatten())

        if self.operation == "subtract":
            dftot = dfs[1].subtract(dfs[0], fill_value=0)
            dftot = dftot.sort_index()
            X = dftot.index
            Y = dftot.values
        else:
            X = dfs[0].index
            Y = dfs[0].values
            #print "Operation not supported, exiting"
            #sys.exit(-1)

        if self.json_name is not None:
            json_file = open(self.json_name, 'w')
            json_dict = {}
            json_dict["run"] = ""
            json_dict["name"] = "-".join(list(daq_quantities.keys()))
            json_dict["plot_type"] = "scatter"
            json_dict["label_x"] = self.plots[0]['x']
            json_dict["label_y"] = self.plots[0]['y']
            json_dict["data"] = []
            json_dict["data"].append(X.tolist())
            json_dict["data"].append(Y.tolist())
            json.dump(json_dict, json_file)
            json_file.close()

        if self.csv_name is not None:
            import csv
            with open(self.csv_name, 'wb') as csvfile:
                spamwriter = csv.writer(csvfile,
                                        delimiter=',',
                                        quotechar='|',
                                        quoting=csv.QUOTE_MINIMAL)

                spamwriter.writerow([self.plots[0]["x"], self.plots[0]["y"]])
                for i, x_i in enumerate(X):
                    spamwriter.writerow([x_i, Y[i]])
            csvfile.close()

        if len(X) > 0 and len(Y) > 0:
            #self.ax.set_xlim(0.9 * float(min(X)), 1.1 * float(max(X)))
            self.ax.set_ylim(0.99 * float(min(Y)), 1.01 * float(max(Y)))

        if self.average:
            dftot_avg = dftot.mean(level=0)
            dftot_std = dftot.std(level=0)
            avg_x = dftot_avg.index.tolist()
            avg_y = dftot_avg[plot['y']].values.tolist()
            std_y = dftot_std[plot['y']].values.tolist()

            self.line.set_data(avg_x, avg_y)
            error_plus = np.array(avg_y) + np.array(std_y)
            error_minus = np.array(avg_y) - np.array(std_y)
            self.line.set_data(avg_x, avg_y)
            for coll in (self.ax.collections):
                self.ax.collections.remove(coll)
            self.ax.fill_between(avg_x,
                                 error_minus,
                                 error_plus,
                                 alpha=0.5,
                                 edgecolor='#CC4F1B',
                                 facecolor='#FF9848')

        else:
            self.line.set_data(X.tolist(), Y.tolist())

        for i, l in enumerate(self.line2):
            if dfs[i].empty:
                continue
            if i > 0:
                xmin, xmax = self.ax2.get_xlim()
                ymin, ymax = self.ax2.get_ylim()
            else:
                xmin = dfs[i].idxmin()[self.plots[i]['y']]
                xmax = dfs[i].idxmax()[self.plots[i]['y']]
                ymin = dfs[i].min()[self.plots[i]['y']]
                ymax = dfs[i].max()[self.plots[i]['y']]

            if xmin > dfs[i].idxmin()[self.plots[i]['y']]:
                xmin = dfs[i].idxmin()[self.plots[i]['y']]
            if xmax < dfs[i].idxmax()[self.plots[i]['y']]:
                xmax = dfs[i].idxmax()[self.plots[i]['y']]
            #self.ax2.set_xlim(0.9 * float(xmin), 1.1 * float(xmax))
            if ymin > dfs[i].min()[self.plots[i]['y']]:
                ymin = dfs[i].min()[self.plots[i]['y']]
            if ymax < dfs[i].max()[self.plots[i]['y']]:
                ymax = dfs[i].max()[self.plots[i]['y']]
            self.ax2.set_ylim(0.99 * float(ymin), 1.01 * float(ymax))

            print(dfs[i].index.values, dfs[i].values.flatten())
            self.line2[i].set_data(dfs[i].index.values,
                                   dfs[i].values.flatten())
            #self.line2[i].set_data(xs[i], ys[i])

        return self.line, self.line2[0], self.line2[1]