Ejemplo n.º 1
0
def plot_combined(matched_runs, scaling_factors, ipts, publish=True):
    data_names = []
    data_list = []
    for i, run in enumerate(matched_runs):
        for xs in ['Off_Off', 'On_Off', 'Off_On', 'On_On']:
            file_path = "/SNS/REF_M/IPTS-%s/shared/autoreduce/REF_M_%s_%s_autoreduce.dat" % (ipts, run, xs)
            if os.path.isfile(file_path):
                ref_data = pandas.read_csv(file_path,
                                           delim_whitespace=True, comment='#', names=['q','r','dr','dq', 'a'])
                data_list.append([ref_data['q'], scaling_factors[i]*ref_data['r'], scaling_factors[i]*ref_data['dr']])
                data_names.append("r%s [%s]" % (run, xs))

    try:
        # Depending on where we run, we might get our publisher from
        # different places, or not at all.
        try: # version on autoreduce
            from postprocessing.publish_plot import plot1d
        except ImportError: # version on instrument computers
            from .web_report import plot1d
        if data_names:
            return plot1d(matched_runs[-1], data_list, data_names=data_names, instrument='REF_M',
                          x_title=u"Q (1/\u212b)", x_log=True,
                          y_title="Reflectivity", y_log=True, show_dx=False, publish=publish)
        else:
            logger.notice("Nothing to plot")
    except:
        logger.error(str(sys.exc_value))
        logger.error("No publisher module found")
    return None
Ejemplo n.º 2
0
                         OrderDirectBeamsByRunNumber=True,
                         TemplateFile=template_file, FindPeaks=False)
first_run_of_set=int(output[1])


#-------------------------------------------------------------------------
# Produce plot for the web monitor
default_file_name = 'REFL_%s_combined_data_auto.txt' % first_run_of_set
if os.path.isfile(default_file_name):
    print("Loading %s" % os.path.join(output_dir, default_file_name))
    reflectivity = LoadAscii(Filename=os.path.join(output_dir, default_file_name), Unit="MomentumTransfer")

    from postprocessing.publish_plot import plot1d
    x = reflectivity.readX(0)
    y = reflectivity.readY(0)
    dy = reflectivity.readE(0)
    dx = reflectivity.readDx(0)
    
    if int(run_number) - first_run_of_set < 10:
        for r in range(0, 10):
            if os.path.isfile('REFL_%s_%s_%s_auto.nxs' % (first_run_of_set, r+1, first_run_of_set+r)):
                plot1d(first_run_of_set+r, [[x, y, dy, dx]], instrument='REF_L', 
                       x_title=u"Q (1/\u212b)", x_log=True,
                       y_title="Reflectivity", y_log=True, show_dx=False)
    else:
        plot1d(run_number, [[x, y, dy, dx]], instrument='REF_L', 
               x_title=u"Q (1/\u212b)", x_log=True,
               y_title="Reflectivity", y_log=True, show_dx=False)


Ejemplo n.º 3
0
def run(filename, outdir, setEi=None):
    DGSdict=preprocessVanadium(RawVanadium, processed_van_file, MaskBTPParameters)
    #--------------------------------------
    #Preprocess data to get Ei and T0
    #--------------------------------------
    [EGuess,Ei,T0]=preprocessData(filename, setEi=setEi)
    """

    if os.path.isfile(outdir+'experiment_log.csv'):
        fm='fastappend'
    else:
        fm='new'
        
    snames='RunNumber,Title,Comment,StartTime,EndTime,Duration,ProtonCharge,'+\
    'vChTrans,Speed1,Speed1,Speed1,Phase1,Phase1,Phase1,Speed2,Speed2,Speed2,'+\
    'Phase2,Phase2,Phase2,Speed3,Speed3,Speed3,Phase3,Phase3,Phase3,EnergyRequest,s1t,s1r,s1l,s1b,'+\
    'vAttenuator2,vAttenuator1,svpressure,svpressure,svpressure,dvpressure,dvpressure,dvpressure'+\
    'phi,phi,phi,Lakeshore1SensorA,Lakeshore1SensorA,Lakeshore1SensorA,'+\
    'Lakeshore1SensorB,Lakeshore1SensorB,Lakeshore1SensorB,'+\
    'Lakeshore1SensorC,Lakeshore1SensorC,Lakeshore1SensorC,'+\
    'Lakeshore1SensorD,Lakeshore1SensorD,Lakeshore1SensorD,'+\
    'Lakeshore2SensorA,Lakeshore2SensorA,Lakeshore2SensorA,'+\
    'Lakeshore2SensorB,Lakeshore2SensorB,Lakeshore2SensorB,'+\
    'Lakeshore2SensorC,Lakeshore2SensorC,Lakeshore2SensorC,'+\
    'Lakeshore2SensorD,Lakeshore2SensorD,Lakeshore2SensorD,'+\
    'SampleTemperatureOrangeCryo,SampleTemperatureOrangeCryo,SampleTemperatureOrangeCryo,CalculatedEi,CalculatedT0'
    
    stitles='RunNumber,Title,Comment,StartTime,EndTime,Duration,ProtonCharge,'+\
    'vChTrans,Speed1min,Speed1max,Speed1avg,Phase1min,Phase1max,Phase1avg,Speed2min,Speed2max,Speed2avg,'+\
    'Phase2min,Phase2max,Phase2avg,Speed3min,Speed3max,Speed3avg,Phase3min,Phase3max,Phase3avg,'+\
    'EnergyRequest,s1t,s1r,s1l,s1b,'+\
    'vAttenuator2,vAttenuator1,svpressuremin,svpressuremax,svpressureavg,dvpressuremin,dvpressuremax,dvpressureavg'+\
    'phimin,phimax,phiavg,Lakeshore1SensorAmin,Lakeshore1SensorAmax,Lakeshore1SensorAavg,'+\
    'Lakeshore1SensorBmin,Lakeshore1SensorBmax,Lakeshore1SensorBavg,'+\
    'Lakeshore1SensorCmin,Lakeshore1SensorCmax,Lakeshore1SensorCavg,'+\
    'Lakeshore1SensorDmin,Lakeshore1SensorDmax,Lakeshore1SensorDavg,'+\
    'Lakeshore2SensorAmin,Lakeshore2SensorAmax,Lakeshore2SensorAavg,'+\
    'Lakeshore2SensorBmin,Lakeshore2SensorBmax,Lakeshore2SensorBavg,'+\
    'Lakeshore2SensorCmin,Lakeshore2SensorCmax,Lakeshore2SensorCavg,'+\
    'Lakeshore2SensorDmin,Lakeshore2SensorDmax,Lakeshore2SensorDavg,'+\
    'SampleTemperatureOrangeCryomin,SampleTemperatureOrangeCryomax,SampleTemperatureOrangeCryoavg,CalculatedEi,CalculatedT0'
    
    
    soperations = ['0']*len(snames.split(','))
    
    for i,name in enumerate(stitles.split(',')):
        name=name.strip()
        if name in ['RunNumber','Title','Comment','StartTime','EndTime']:
            soperations[i] = 'None'
        if name.find('min') == len(name)-3:
            soperations[i] = 'min'
        if name.find('max') == len(name)-3:
            soperations[i] = 'max'
        if name.find('avg') == len(name)-3:
            soperations[i] = 'average'
                           
    
    ExportExperimentLog(InputWorkspace = '__IWS',
                        OutputFilename = outdir+'experiment_log.csv',
                        FileMode = fm,
                        SampleLogNames = snames,
                        SampleLogTitles = stitles,
                        SampleLogOperation = ','.join(soperations),
                        FileFormat = "comma (csv)",
                        TimeZone = "America/New_York")
    """
    elog=ExperimentLog()
    elog.setLogList('vChTrans,Speed1,Phase1,Speed2,Phase2,Speed3,Phase3,EnergyRequest,s1t,s1r,s1l,s1b,s2t, s2r, s2l, s2b,  vAttenuator2,vAttenuator1,svpressure,dvpressure,Lakeshore1SensorA, Lakeshore1SensorB, Lakeshore2SensorB')
    elog.setSimpleLogList("vChTrans, EnergyRequest, s1t, s1r, s1l, s1b, s2t, s2r, s2l, s2b, vAttenuator2, vAttenuator1, Lakeshore1SensorA, Lakeshore1SensorB, Lakeshore2SensorB")
    elog.setSERotOptions('CCR13VRot, SEOCRot, CCR16Rot, CCR22Rot,phi')
    elog.setSETempOptions('SampleTemp, sampletemp, SensorA, SensorA340 ')
    elog.setFilename(outdir+'experiment_log.csv')
    angle=elog.save_line('__MonWS',CalculatedEi=Ei,CalculatedT0=T0)
    
    outpre='SEQ'
    runnum=str(mtd['__IWS'].getRunNumber()) 
    outfile=outpre+'_'+runnum+'_autoreduced'
    if not numpy.isnan(Ei):
        DGSdict['SampleInputWorkspace']='__IWS'
        DGSdict['SampleInputMonitorWorkspace']='__MonWS'
        DGSdict['IncidentEnergyGuess']=Ei
        DGSdict['UseIncidentEnergyGuess']='1'
        DGSdict['TimeZeroGuess']=T0
        DGSdict['EnergyTransferRange']=[Emin*EGuess,Estep*EGuess,Emax*EGuess]  #Typical values are -0.5*EGuess, 0.005*EGuess, 0.95*EGuess
        DGSdict['SofPhiEIsDistribution']='0' # keep events
        DGSdict['HardMaskFile']=HardMaskFile
        DGSdict['GroupingFile']=grouping   #'/SNS/SEQ/shared/autoreduce/SEQ_2x2_grouping.xml' #Typically an empty string '', choose 2x1 or some other grouping file created by GenerateGroupingSNSInelastic or GenerateGroupingPowder
        DGSdict['IncidentBeamNormalisation']='None'  #NEXUS file does not have any normaliztion, but the nxspe IS normalized later in code by charge
        DGSdict['UseBoundsForDetVan']='1'
        DGSdict['DetVanIntRangeHigh']=IntegrationRange[1]
        DGSdict['DetVanIntRangeLow']=IntegrationRange[0]
        DGSdict['DetVanIntRangeUnits']='Wavelength'
        DGSdict['OutputWorkspace']='__OWS'
        DgsReduction(**DGSdict)
        

        if create_elastic_nxspe:
            DGSdict['OutputWorkspace']='reduce_elastic'
            EGuess=DGSdict['IncidentEnergyGuess']
            DGSdict['EnergyTransferRange']=[-0.02*EGuess,0.005*EGuess,0.02*EGuess]
            DgsReduction(**DGSdict)
            nxspe_filename=os.path.join(outdir, "elastic","SEQ_" + runnum + "_elastic.nxspe")
            SaveNXSPE(Filename=nxspe_filename, InputWorkspace="reduce_elastic", Psi=angle, KiOverKfScaling='1')
            os.chmod(nxspe_filename,0664)

        #Do normalization of vanadum to 1
        # This step only runs ONCE if the processed vanadium file is not already present.
        if DGSdict.has_key('SaveProcessedDetVan') and NormalizedVanadiumEqualToOne:
              filename=DGSdict['SaveProcDetVanFilename']
              LoadNexus(Filename=filename,OutputWorkspace="__VAN")
              datay = mtd['__VAN'].extractY()
              meanval = float(datay[datay>0].mean())
              CreateSingleValuedWorkspace(OutputWorkspace='__meanval',DataValue=meanval)
              #Divide the vanadium by the mean
              Divide(LHSWorkspace='__VAN',RHSWorkspace='__meanval',OutputWorkspace='__VAN')
              #multiple by the mean of vanadium Normalized data = Data / (Van/meanvan) = Data *meanvan/Van
              # this is because in DgsReduction the output data was already normalized by vanadium data.
              # The only thing is that the normalization of vanadium to 1 is happening afterwards 
              # right here in this section. So we need to compensate for that.
              Multiply(LHSWorkspace='__OWS',RHSWorkspace='__meanval',OutputWorkspace='__OWS') 
              SaveNexus(InputWorkspace="__VAN", Filename= filename)        
        
        SaveNexus(InputWorkspace="__OWS", Filename= outdir+outfile+".nxs")
        RebinToWorkspace(WorkspaceToRebin="__OWS",WorkspaceToMatch="__OWS",OutputWorkspace="__OWS",PreserveEvents='0')
        NormaliseByCurrent(InputWorkspace="__OWS",OutputWorkspace="__OWS")
        #Divide by bin width
        ConvertToDistribution(Workspace="__OWS") 
        #generate summed spectra_plot		                                                                
#---------------------------------------          
        s=SumSpectra("__OWS")
        x=s.readX(0)
        y=s.readY(0)
        # where is postprocessing?
        try:
            from postprocessing.publish_plot import plot1d
            plot1d(runnum, [x[1:], y], instrument='SEQ', 
                x_title="Energy transfer (meV)",
                y_title="Intensity", y_log=True)
        except:
            logger.error("Failed to publish plot")
        
        if NXSPE_flag:    
            angle=mtd["__OWS"].run()['phi'].getStatistics().mean      
            SaveNXSPE(InputWorkspace="__OWS", Filename= outdir+outfile+".nxspe",Efixed=Ei,Psi=angle,KiOverKfScaling=True)
            GenerateGroupingPowder(InputWorkspace="__OWS",AngleStep=0.5, GroupingFilename=outdir+'powdergroupfile.xml')
            GroupDetectors(InputWorkspace="__OWS", OutputWorkspace="powdergroupdata", MapFile=outdir+'powdergroupfile.xml',Behaviour='Average')
            SaveNXSPE(InputWorkspace="powdergroupdata", Filename= outdir+"/powder/"+outfile+"_powder.nxspe",
                      Efixed=Ei,Psi=angle,KiOverKfScaling=True,ParFile=outdir+'powdergroupfile.par') 
        if clean:
            WS_clean()
    else:
       ConvertUnits(InputWorkspace="__IWS",OutputWorkspace="__IWS",Target='dSpacing')
       Rebin(InputWorkspace="__IWS",OutputWorkspace="__OWS",Params='0.5,0.005,10',PreserveEvents='0')
       SaveNexus(InputWorkspace="__OWS", Filename= outdir+outfile+".nxs")
                                                    
    return
Ejemplo n.º 4
0
                    CropWorkspace(InputWorkspace="USANS_detector", OutputWorkspace="peak_detector", XMin=peak[0], XMax=peak[1])
                    StepScan(InputWorkspace="peak_detector", OutputWorkspace="scan_table")
                    ConvertTableToMatrixWorkspace(InputWorkspace="scan_table", ColumnX=scan_var,
                                                  ColumnY="Counts", ColumnE="Error", OutputWorkspace="USANS_scan_detector")
                    mtd['USANS_scan_detector'].getAxis(1).getUnit().setLabel("Counts", "Counts")
                    x_data = mtd["USANS_scan_detector"].readX(0)
                    y_data = mtd["USANS_scan_detector"].readY(0)
                    e_data = mtd["USANS_scan_detector"].readE(0)

                    if i == 0:
                        file_path = os.path.join(outdir, "%s_detector_%s.txt" % (file_prefix, main_wl))
                        SaveAscii(InputWorkspace="USANS_scan_detector",Filename=file_path, WriteSpectrumID=False)
                        #json_file_path = os.path.join(outdir, "%s_plot_data.json" % file_prefix)
                        #SavePlot1DAsJson(InputWorkspace="USANS_scan_detector", JsonFilename=json_file_path, PlotName="main_output")
                        from postprocessing.publish_plot import plot1d
                        plot1d(run_number, [[x_data, y_data, e_data]], instrument='USANS', 
                               x_title=scan_var, y_title="Counts", y_log=True)

                        # Save scan info to use for stitching later
                        update_sequence_info(os.path.join(outdir, "scan_%s.json" % sequence_first_run),
                                             {run_number: {'iq':file_path} })
                        
                        for i_theta in range(len(x_data)):
                            q = 2.0*math.pi*math.sin(x_data[i_theta]*math.pi/180.0/3600.0)/wavelength[main_index]
                            #if q<=0:
                            #    continue
                            
                            # Write I(q) file
                            i_q = y_data[i_theta]/y_monitor[i_theta]
                            di_q = math.sqrt( (e_data[i_theta]/y_monitor[i_theta])**2 + y_data[i_theta]**2/y_monitor[i_theta]**3)
                            iq_fd_simple.write("%-10.6g %-10.6g %-10.6g\n" % (q, i_q, di_q))
Ejemplo n.º 5
0
    y = reflectivity.readY(0)
    dy = reflectivity.readE(0)
    dx = reflectivity.readDx(0)

    if int(run_number) - first_run_of_set < 10:
        for r in range(0, 10):
            reduced_file_name = 'REFL_%s_%s_%s_auto.nxs' % (
                first_run_of_set, r + 1, first_run_of_set + r)
            reduced_file_path = os.path.join(output_dir, reduced_file_name)
            if os.path.isfile(reduced_file_path):
                # Look to see whether submitting the plot is enabled
                if plotting_ready:
                    plot1d(first_run_of_set + r, [[x, y, dy, dx]],
                           instrument='REF_L',
                           x_title=u"Q (1/A)",
                           x_log=True,
                           y_title="Reflectivity",
                           y_log=True,
                           show_dx=False)
                else:
                    plot_div = plot1d(first_run_of_set + r, [[x, y, dy, dx]],
                                      instrument='REF_L',
                                      x_title=u"q (1/A)",
                                      x_log=True,
                                      y_title="Reflectivity",
                                      y_log=True,
                                      show_dx=False,
                                      publish=False)
                    publish_plot('REF_L',
                                 first_run_of_set + r,
                                 files={'file': plot_div},