#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Dec 21 15:39:33 2022 @author: badler This script creates housekeeping plots for the ASSIST This includes - Timeseries plots of certain variables - dashboard for camera images - Dashboard display traffic light design and recent camera image - traffic light hovmoeller for last day, month, 3 months """ import os import glob import matplotlib.dates as mdates import matplotlib.pyplot as plt import matplotlib import string from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib import gridspec from cycler import cycler from scipy.interpolate import interp1d from metpy.units import units import metpy.calc as mpcalc import datetime import numpy as np import pandas as pd from pandas.tseries.frequencies import to_offset import xarray as xr from netCDF4 import Dataset import scipy.io as sio import shutil import zipfile # plot settings params = {'legend.fontsize': 'xx-large', 'figure.figsize': (15, 5), 'axes.labelsize': 'x-large', 'axes.titlesize':'xx-large', 'xtick.labelsize':'x-large', 'ytick.labelsize':'x-large', 'xtick.minor.size':3.5, 'xtick.top': True, 'ytick.right': True} plt.rcParams.update(params) default_cycler=(cycler(color=plt.cm.nipy_spectral(np.linspace(0,1,15)))) plt.rc('axes', prop_cycle=default_cycler) #some general defintions tsdefcolor=['black','indianred','forestgreen','royalblue','mediumpurple',\ 'magenta','cyan','lime','orange','yellow','red'] tscycler=cycler(color=tsdefcolor) dpi=50 JULIAN_OFFSET = 1721424.5 # Julian date at 0000-12-31 EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal()) #some functions def datenum_to_datetime(datenum): """ from: https://gist.github.com/victorkristof/b9d794fe1ed12e708b9d Convert Matlab datenum into Python datetime. :param datenum: Date in datenum format :return: Datetime object corresponding to datenum. """ days = datenum % 1 return datetime.datetime.fromordinal(int(datenum)) \ + datetime.timedelta(days=days) \ - datetime.timedelta(days=366) def datestr2num(*args): x=args[0] if len(args)==1: str_format='%Y%m%d' else: str_format=args[1] n=mdates.date2num(datetime.datetime.strptime(x,str_format)) return n def num2datestr(*args): x = args[0] if len(args)==1: str_format='%Y%m%d%H%M%S' else: str_format=args[1] d=mdates.num2date(x) s=d.strftime(str_format) return s def num2julian(n): """ Convert a Matplotlib date (or sequence) to a Julian date (or sequence). Parameters ---------- n : float or sequence of floats Matplotlib dates (days relative to `.get_epoch`). Returns ------- float or sequence of floats Julian dates (days relative to 4713 BC Jan 1, 12:00:00). """ ep = np.datetime64(mdates.get_epoch(), 'h').astype(float) / 24. ep0 = np.datetime64('0000-12-31T00:00:00', 'h').astype(float) / 24. # Julian offset defined above is relative to 0000-12-31, but we need # relative to our current epoch: dt = JULIAN_OFFSET - ep0 + ep return np.add(n, dt) # Handles both scalar & nonscalar j. def get_ticks(start, end): from datetime import timedelta as td delta = end - start if delta <= td(days=1.5): loc = mdates.HourLocator(byhour=range(0,24,6)) fmt = mdates.DateFormatter('%H:%M\n%m/%d') elif delta <= td(days=3): loc = mdates.HourLocator(byhour=range(0,24,12)) fmt = mdates.DateFormatter('%m/%d %H') elif delta <= td(days=15): loc = mdates.DayLocator() fmt = mdates.DateFormatter('%m/%d') elif delta <= td(days=30): loc = mdates.DayLocator(interval=2) fmt = mdates.DateFormatter('%m/%d') elif delta <= td(days=50): loc = mdates.DayLocator(interval=5) fmt = mdates.DateFormatter('%m/%d') elif delta <= td(days=100): loc = mdates.DayLocator(interval=10) fmt = mdates.DateFormatter('%m/%d') else: loc = mdates.DayLocator(interval=20) fmt = mdates.DateFormatter('%m/%d') return loc,fmt def calc_pressureprofile(p0,temp,height): #function to calculate pressure profile from surface pressure # and temperature profiles using barometric height formulas #temperature in K, pressure in hPa, output in hPa pres=np.full(temp.shape,np.nan) pres[0] = p0 for z in range(1,len(height)): pres[z] = pres[z-1]*np.exp(-9.81*(height[z]-height[z-1])/(287 * np.nanmean([temp[z],temp[z-1]]))) return pres def calc_theta(temp,pres): #function to calculate potential tempeature difference #temperature in K, pressure in hPa theta = temp*(1000./pres)**0.2854 return theta def get_nearest(refa,mya): #function to get idx of values in mya which are closest to values in refa idx=abs(refa[:,None]-mya[None,:]).argmin(axis=-1) return idx def vaporpressure(temp): #calculates vapor pressure over a flat water surface #if the input temperature is the air temperature it returns # saturation vapor pressure #if input is dew point temperature #it returns actual vapor pressure #tempeature in celsius wv = 6.112*np.exp(17.62*temp/(243.12+temp)) return wv def calc_mixingratio(e,p): #e: in hPa, p in hpa #return mr in kg/kg mr=0.622*(e/(p-e)) return mr def calc_absolutehumidity(e,temp): #function to calculate absolute humidity # from actual water vapour e (hPa) and air temperature (C) #output in g/m^3 a = 100.*e/(461.52*(temp+273.15))*1000 return a def calc_iwv(abshum,height): #compute iwv from profile, abshum in g/m3 and height in m #compute mean of abshum for height layers #previous method, abshum and diff(hegiht) don't have the same shape! # abshummean=moving_average(abshum,2) # iwv=np.nansum(abshummean*np.diff(height))/1000 df=pd.DataFrame(np.transpose(abshum/1000)) dfmean=df.rolling(2).mean() amean=np.transpose(dfmean.to_numpy()[1:,:]) iwv=np.nansum(amean*np.diff(height),axis=1) return iwv def namestr(obj, namespace): return [name for name in namespace if namespace[name] is obj][0] def read_met_from_ascii(siteid,datestring): ##%% fpathin='/psd2data/obs/realtime/CsiDatalogger/SurfaceMet/'+siteid julday=int(num2julian(datestr2num(datestring))-num2julian(datestr2num(str(int(datestring[0:4])-1)+'12'+'31'))) julday=str(julday).zfill(3) year=datestring[:4] l_ = sorted(glob.glob(os.path.join(fpathin,\ year,julday,siteid+'*'+year[2:4]+julday+'.*m'))) # l=glob.glob(os.path.join(devicedir,statid+'*'+rass+'*'+d+'*.cdf')) if len(l_): print('process met data on '+ datestring) header=['id','year','jday','hmin','pressure','temperature','relative_humidity','wind_speed','vecwind_speed',\ 'wind_direction','stdwind_direction','battery_voltage','maxwind_speed'] IN={} for field in header: IN[field]=[] for l in range(0,len(l_)): print('read '+l_[l]) with open(l_[l]) as f: flist=list(f) fsplit=[x.split(',') for x in flist] ffloat=list(np.float_(fsplit)) data=np.asarray(ffloat) if data.shape[1]0: p.set_marker(markers[i_varsall]) p.set_linestyle('None') ax.grid('on') try: ax.set_ylim(ylims[i_varsall]) except: print('no ylim defined for subplot '+str(i_varsall)) ax.set_ylabel(ylabelstr[i_varsall]) lines,labels=ax.get_legend_handles_labels() lines=np.atleast_1d(lines) if len(lines)>8: ax.legend(fontsize='medium',ncol=int(len(lines)/3.)) elif len(lines)==1: ax.legend(fontsize='medium',ncol=1) elif len(lines)==0: print('do not try to plot legend') else: ax.legend(fontsize='medium',ncol=int(len(lines)/2.)) ax.set_xlim(xlim[0],xlim[1]) loc,fmt=get_ticks(mdates.num2date(ax.get_xlim()[0]),mdates.num2date(ax.get_xlim()[1])) ax.xaxis.set_major_locator(loc) ax.xaxis.set_major_formatter(fmt) if drange==1: locminor=mdates.HourLocator() ax.xaxis.set_minor_locator(locminor) ax.set_xlabel('Time (UTC)') axs[0].set_title(datestring+', '+site+' '+assist) logo=fig.add_axes([0.1,0.04,0.05,0.05],anchor='NW',zorder=1) im=plt.imread(os.path.join(funcdir,'noaa_web.png')) logo.imshow(im) logo.axis('off') plt.text(0.9, 0.05, 'ASSIST housekeeping'+\ '\nContact: Bianca.Adler@noaa.gov, Creation date: '+datetime.datetime.now(datetime.UTC).strftime('%d %b %Y %H:%M UTC'), \ fontsize=14, transform=plt.gcf().transFigure,\ ha='right') print('save as '+fout) plt.savefig(fout,format='png',dpi=dpi,bbox_inches='tight') def plot_timeseries_dailystats(data,pspecs,drange): ##%% varsall=pspecs['varsall'] ylabelstr=pspecs['ylabelstr'] varsdiff=pspecs['varsdiff'] ylims=pspecs['ylims'] dailystatstype=pspecs['dailystatstype'] markers=pspecs['markers'] fout=pspecs['fout']+'_'+str(drange)+'.png' fig,axs = plt.subplots(nrows=len(varsall),ncols=1,figsize=(13.5,3*len(varsall)),\ facecolor='w',edgecolor='w',gridspec_kw={'hspace': 0.07,'wspace': 0.00},\ tight_layout={'pad':0},sharex=True) axs=np.atleast_1d(axs) for i_varsall in range(0,len(varsall)): vars_=varsall[i_varsall] if type(axs) is np.ndarray: ax=axs[i_varsall] else: ax=axs ax.set_prop_cycle(tscycler) for i_vars in range(0,len(vars_)): var=vars_[i_vars] print('plot '+var) try: if varsdiff[i_varsall]==1: if 'assistsum' in var: fvar=var[:var.index('assist')] varsd_=[v for v in vars_ if not v in var] for i_varsd in range(len(varsd_)): vard=varsd_[i_varsd] fvard=vard[:vard.index('assist')] plotvar=assistsumplot[fvar]-assistsumplot[fvard] plotvarmain=plotvar.resample(time='1D').mean(dim='time') plotvarstd=plotvar.resample(time='1D').std(dim='time') plotdt=mdates.date2num(plotvarmain['time'].values) labelstr=fvar+'-'+fvard ax.plot(plotdt,plotvarmain,'o',label=labelstr) if 'plotvarstd' in locals(): ax.fill_between(plotdt,plotvarmain-plotvarstd,plotvarmain+plotvarstd,alpha=0.3) del plotvarstd else: print('ERROR: add instrument for difference plots') else: if 'assistsum' in var: fvar=var[:var.index('assist')] labelstr=fvar plotvar=assistsumplot[fvar] elif 'assistnfch1' in var: fvar=var[:var.index('assist')] labelstr=fvar plotvar=assistnfchAplot[fvar] elif 'metpsl' in var: fvar=var[:var.index('metpsl')] plotvar=metpslplot[fvar] labelstr='MET PSL' elif 'ceilo' in var: fvar=var[:var.index('ceilo')] plotvar=ceilplot[fvar] labelstr='Ceilo' if dailystatstype[i_varsall] == 'mean': plotvarmain=plotvar.resample(time='1D').mean(dim='time') plotvarstd=plotvar.resample(time='1D').std(dim='time') elif dailystatstype[i_varsall].startswith('count'): if 'eq' in dailystatstype[i_varsall]: mask= (plotvar == float(dailystatstype[i_varsall][7:])).compute() elif 'gt' in dailystatstype[i_varsall]: mask= (plotvar > float(dailystatstype[i_varsall][7:])).compute() plotvarmain=plotvar[mask].resample(time='1D').count(dim='time')/\ plotvar.resample(time='1D').count(dim='time')*100 plotdt=mdates.date2num(plotvarmain['time'].values) plotdtadd=plotvar.resample(time='1D').mean(dim='time').time plotvaradd=plotvar.resample(time='1D').count(dim='time')/\ plotvar.time.resample(time='1D').count(dim='time')*100 #only plot values until datestring tidx=np.where(plotdtadd['time.date']<=datetime.datetime.strptime(datestring,'%Y%m%d').date())[0] ax.plot(plotdtadd[tidx],plotvaradd[tidx],label='avail. samples',marker='o') # ax.plot(plotvar.resample(time='1D').mean(dim='time').time,plotvar.resample(time='1D').count(dim='time')/\ # plotvar.time.resample(time='1D').count(dim='time')*100,\ # label='avail. samples',marker='o') # ylabelstr[i_varsall]=ylabelstr[i_varsall]+' in %' plotdt=mdates.date2num(plotvarmain['time'].values) p,=ax.plot(plotdt,plotvarmain,marker='o',label=labelstr) if 'plotvarstd' in locals(): ax.fill_between(plotdt,plotvarmain-plotvarstd,plotvarmain+plotvarstd,alpha=0.3) del plotvarstd except: print('ERROR') ax.grid('on') try: ax.set_ylim(ylims[i_varsall]) except: print('no ylim defined for subplot '+str(i_varsall)) ax.set_ylabel(ylabelstr[i_varsall]) lines,labels=ax.get_legend_handles_labels() lines=np.atleast_1d(lines) if len(lines)>8: ax.legend(fontsize='medium',ncol=int(len(lines)/3.)) elif len(lines)==1: ax.legend(fontsize='medium',ncol=1) elif len(lines)==0: print('do not try to plot legend') else: ax.legend(fontsize='medium',ncol=int(len(lines)/2.)) ax.set_xlim(xlim[0],xlim[1]) loc,fmt=get_ticks(mdates.num2date(ax.get_xlim()[0]),mdates.num2date(ax.get_xlim()[1])) ax.xaxis.set_major_locator(loc) ax.xaxis.set_major_formatter(fmt) if drange==1: locminor=mdates.HourLocator() ax.xaxis.set_minor_locator(locminor) ax.set_xlabel('Time (UTC)') axs[0].set_title(datestring+', '+site+' '+assist) logo=fig.add_axes([0.1,0.04,0.05,0.05],anchor='NW',zorder=1) im=plt.imread(os.path.join(funcdir,'noaa_web.png')) logo.imshow(im) logo.axis('off') plt.text(0.9, 0.05, 'ASSIST housekeeping'+\ '\nContact: Bianca.Adler@noaa.gov, Creation date: '+datetime.datetime.now(datetime.UTC).strftime('%d %b %Y %H:%M UTC'), \ fontsize=14, transform=plt.gcf().transFigure,\ ha='right') plt.savefig(fout,format='png',dpi=dpi,bbox_inches='tight') #%% #get some global variables campaigndir=os.getenv('campaigndir') funcdir=os.getenv('funcdir') rawceildir=os.getenv('rawceildir') rawassistdir=os.getenv('rawirsdir') datestring=os.getenv('pdate') site=os.getenv('site') # oversion=os.getenv('oversion') processtype=os.getenv('processtype') fpathout=os.path.join(campaigndir,'quicklooks_irs') if not os.path.isdir(fpathout): os.makedirs(fpathout) if processtype == 'reprocess': #for processing at NOAA lab, day at midnight statustime=datetime.datetime.strptime(datestring+'000000','%Y%m%d%H%M%S')+datetime.timedelta(days=1) yesterday = (statustime-pd.to_timedelta('2D')).strftime('%Y%m%d') else: #time to process, for realtime processing several times a day on site this is current time statustime=datetime.datetime.now() yesterday = (statustime-pd.to_timedelta('1D')).strftime('%Y%m%d') #for testing, create time yourself #statustime=datetime.datetime.strptime(datestring+'030000','%Y%m%d%H%M%S') #statustime=datetime.datetime.strptime(datestring+'235900','%Y%m%d%H%M%S') # if yesterday is processed do full day if yesterday == datestring: statustime=datetime.datetime.strptime(datestring+'000000','%Y%m%d%H%M%S')+datetime.timedelta(days=1) #midnight of following day, needed to determine when long timeseries are going to be plotted #midnight=datetime.datetime.combine(statustime.date()+datetime.timedelta(days=1),datetime.time()) #midnight of same day midnight=datetime.datetime.combine(statustime.date()+datetime.timedelta(days=0),datetime.time()) # sdate=pd.to_datetime(datestring) plotinfo={} #%% #MET data from PSL tower #try to load last 90 days if 'OUT' in locals(): del OUT if datestring == yesterday : daynumber=90 elif datestring[-2:] == '01': daynumber=90 else: daynumber=2 for d in range(daynumber,-1,-1): cday=datetime.datetime.strftime(datetime.datetime.strptime(datestring,'%Y%m%d')-datetime.timedelta(days=d),'%Y%m%d') listdir = sorted(glob.glob(os.path.join(campaigndir,'met','*'+cday+'*.nc'))) if len(listdir) == 0: listdir = sorted(glob.glob(os.path.join(campaigndir,'met','*'+cday+'*.cdf'))) if len(listdir) == 0: print('no netcdf MET data found for '+cday) else: print('read '+listdir[0]) IN=xr.open_dataset(listdir[0]) #dt=[datetime.datetime.fromtimestamp(t,datetime.UTC) for t in IN['base_time'].values+IN['time_offset'].values] dt=pd.to_datetime(IN.base_time.values+IN.time_offset.values,unit='s') IN['time']=dt if 'atmosphericPressure' in IN.keys(): IN['atmos_pressure']=IN.atmosphericPressure elif 'pressure' in IN.keys(): IN['atmos_pressure']=IN.pressure if 'temperature' in IN.keys(): IN['temp_mean']=IN['temperature'] if 'relativeHumidity' in IN.keys(): IN['rh_mean']=IN['relativeHumidity'] elif 'relative_humidity' in IN.keys(): IN['rh_mean']=IN['relative_humidity'] IN['atmos_pressure']=IN['atmos_pressure']*10 #is in kPa IN['tempabs_mean']=IN['temp_mean']+273.15 #compute water vapor mixing ratio E=vaporpressure(IN.temp_mean) e=IN.rh_mean/100*E IN['mr_mean']=calc_mixingratio(e,IN.atmos_pressure) #convert to g/kg IN['mr_mean']=IN['mr_mean']*1000 if not 'OUT' in locals(): OUT=IN else: OUT=xr.concat([OUT,IN], dim='time') if 'OUT' in locals(): metpsl=OUT.copy(deep=True) #%% #ceilometer data if 'OUT' in locals(): del OUT if datestring == yesterday: daynumber=90 elif datestring[-2:] == '01': daynumber=90 else: daynumber=2 for d in range(daynumber,-1,-1): cday=datetime.datetime.strftime(datetime.datetime.strptime(datestring,'%Y%m%d')-datetime.timedelta(days=d),'%Y%m%d') listdir = sorted(glob.glob(os.path.join(campaigndir,'cbh','*'+cday+'*.cdf'))) if len(listdir) == 0: print('no ceilometer data ending in cdf found for '+cday) listdir = sorted(glob.glob(os.path.join(campaigndir,'cbh','*'+cday+'*.nc'))) if len(listdir)==0: print('no ceilometer data ending in nc found for '+cday) continue print('read '+listdir[0]) IN=xr.open_dataset(listdir[0]) if 'time_offset' in IN.keys(): dt=pd.to_datetime(IN.base_time.values+IN.time_offset.values,unit='s') else: dt=pd.to_datetime(IN.base_time.values+IN.time.values,unit='s') #dt=[datetime.datetime.fromtimestamp(t,datetime.UTC) for t in IN['base_time'].values+IN['time_offset'].values] IN['time']=dt if 'cloudHeight' in IN.keys(): cbhvar='cloudHeight' elif 'first_cbh' in IN.keys(): cbhvar='first_cbh' try: if IN[cbhvar].units == 'm': unit='m' udiv=1000 except: print('no info on ceilo cbh unit,assume it is km') udiv=1 IN[cbhvar]= xr.where(IN[cbhvar]==-999,np.nan,IN[cbhvar]) IN['cbh']=IN[cbhvar]/udiv # IN['cbh'][IN['cbh']<0]=np.nan if not 'OUT' in locals(): OUT=IN else: OUT=xr.concat([OUT,IN], dim='time') if 'OUT' in locals(): ceil=OUT.copy() #%% #assist camera images (only when running script on site) listdircam = sorted(glob.glob(os.path.join(rawassistdir,'camera',datestring,'*.jpg'))) if len(listdircam)==0: #try to unzip listdircamzip = sorted(glob.glob(os.path.join(rawassistdir,'camera',datestring,'*.zip'))) if len(listdircamzip) == 0: listdircam = '' else: with zipfile.ZipFile(listdircamzip[0], 'r') as zip_ref: zip_ref.extractall(os.path.join(rawassistdir,'camera',datestring)) listdircam = sorted(glob.glob(os.path.join(rawassistdir,'camera',datestring,'*.jpg'))) #%% #read all days listdir = sorted(glob.glob(os.path.join(rawassistdir,'SUMMARY','assistsummary*.cdf'))) if len(listdir) ==0: print('no ASSIST sum data found at all') else: #and limit last 90 days # ldates=[pd.to_datetime(l[l.index('assistsummary')+14:l.index('assistsummary')+22]) for l in listdir] ldates=[datetime.datetime.strptime(l.split('/')[-1].split('.')[1],'%Y%m%d') for l in listdir] # read last 90 days because yeesterday is processed if datestring == yesterday: listdir=[listdir[i] for i in range(len(ldates)) \ if (ldates[i]>statustime-datetime.timedelta(days=90)) & \ (ldates[i]statustime-datetime.timedelta(days=90)) & \ (ldates[i]statustime-datetime.timedelta(days=2)) & \ (ldates[i]<=statustime)] if 'OUT' in locals(): del OUT for l in listdir: print('read '+l) IN=xr.open_dataset(l,chunks={}) dt=pd.to_datetime(IN.base_time.values+IN.time.values,unit='ns') #bt=datetime.datetime.fromtimestamp((IN.base_time.values/1e9).astype(int),datetime.UTC) #dt=pd.Timestamp(bt)+IN.time IN['time']=dt #time stamps are not always sorted! IN=IN.sortby('time') if not 'OUT' in locals(): OUT=IN else: OUT=xr.concat([OUT,IN], dim='time') #for correct time unit OUT['time'] = pd.DatetimeIndex(OUT['time'].values) #some computations # OUT['LWresponsivity_max']=OUT['LWresponsivity'].max() OUT['LWresponsivity_max']=OUT.LWresponsivity.chunk(dict(time=-1)).quantile(0.99,dim='time') OUT['LWresponsivity_relative']=OUT['LWresponsivity']/OUT['LWresponsivity_max'].values OUT['SWresponsivity_max']=OUT.SWresponsivity.chunk(dict(time=-1)).quantile(0.99,dim='time') OUT['SWresponsivity_relative']=OUT['SWresponsivity']/OUT['SWresponsivity_max'].values OUT['resp1000_max']=OUT.resp1000.chunk(dict(time=-1)).quantile(0.99,dim='time') OUT['resp1000_relative']=OUT['resp1000']/OUT['resp1000_max'].values OUT['resp2500_max']=OUT.resp2500.chunk(dict(time=-1)).quantile(0.99,dim='time') OUT['resp2500_relative']=OUT['resp2500']/OUT['resp2500_max'].values hr=OUT.hatchRainSensor.str.replace('Clear','1') hr=hr.str.replace('Raining','0').str.replace('R','0') OUT['hatchRainSensor']=hr.astype(float) OUT['atmosphericPressure']=OUT['atmosphericPressure']*10 OUT['externalTemperatureabs']=OUT['externalTemperature']+273.15 assistsum=OUT.copy() #ASSIST nfch1 files ##%% #read all days, I only need sceneMirrorAngle #listdir = sorted(glob.glob(os.path.join(rawassistdir,'SUMMARY','*ChA*.cdf'))) listdir = sorted(glob.glob(os.path.join(campaigndir,'irs_nfch1','*ChA*.cdf'))) # get assistname from chA files fname = listdir[0].split('/')[-1] assist = 'ASSIST'+fname[fname.index('assistNo')+8:fname.index('assistNo')+10] if len(listdir) ==0: print('no ASSIST chA data found for '+datestring) else: #and limit to all days before processed day # ldates=[pd.to_datetime(l[l.index('assistChA')+10:l.index('assistChA')+19]) for l in listdir] ldates=[] for l in listdir: ldates_=datetime.datetime.strptime(l.split('/')[-1].split('.')[1],'%Y%m%d') ldates.append(ldates_) # read last 90 days because yeesterday is processed if datestring == yesterday: listdir=[listdir[i] for i in range(len(ldates)) \ if (ldates[i]>statustime-datetime.timedelta(days=90)) & \ (ldates[i]statustime-datetime.timedelta(days=90)) & \ (ldates[i]statustime-datetime.timedelta(days=2)) & \ (ldates[i]<=statustime)] if 'OUT' in locals(): del OUT for l in listdir: print('read '+l) IN=xr.open_dataset(l) IN = IN[['sceneMirrorAngle','base_time','time']] #bt=datetime.datetime.fromtimestamp((IN.base_time.values/1e3).astype(int),datetime.UTC) #dt=bt+pd.to_timedelta(IN.time,unit='s') dt=pd.to_datetime(IN.base_time.values/1e3+IN.time.values.astype(np.float64),unit='s') IN['time']=dt #drop all varibales with wnum as dimension (dataset gets too big otherwise) #IN=IN.drop_dims('wnum') #time stamps are not always sorted! IN=IN.sortby('time') if not 'OUT' in locals(): OUT=IN else: OUT=xr.concat([OUT,IN], dim='time') #for correct time unit OUT['time'] = pd.DatetimeIndex(OUT['time'].values) assistnfchA=OUT.copy() #store the spectrum for the current day only IN=xr.open_dataset(listdir[-1],chunks={}) bt=datetime.datetime.fromtimestamp((IN.base_time.values/1e3).astype(int),datetime.UTC) dt=bt+pd.to_timedelta(IN.time,unit='s') IN['time']=dt #for correct time unit IN['time'] = pd.DatetimeIndex(IN['time'].values) assistnfchAcurrent=IN.copy() #resample print('resample assistsum') assistsum=resample_xarray(assistsum) print('resample assistnfchA') assistnfchA=resample_xarray(assistnfchA) try: print('resample metpsl') metpsl=resample_xarray(metpsl) except: print('cannot resample metpsl') try: print('resample ceil') ceil=resample_xarray(ceil) except: print('cannot resample ceil') #%% plot time series with native resolution varsall=[['HBBtopTempassistsum','HBBbottomTempassistsum','HBBapexTempassistsum'],\ ['ABBtopTempassistsum','ABBbottomTempassistsum','ABBapexTempassistsum'],\ ['calibrationCBBtempassistsum','calibrationHBBtempassistsum','calibrationAmbientTempassistsum',\ 'frontEndEnclosureTempassistsum','stepMotorTempassistsum',\ 'secondPortBlackBodytempassistsum','interferometerTempassistsum',\ 'ambientAirPowerSupplyTempassistsum','annotatorFtsPlateTempassistsum',\ 'detectorStirlingCoolerBlockTempassistsum','temp_meanmetpsl','externalTemperatureassistsum'],\ ['interferometerHumidityassistsum','rh_meanmetpsl','externalHumidityassistsum'],\ ['sceneMirrorAngleassistnfch1'],\ ['hatchOpenassistsum']] ylabelstr=[r'$\Delta$ T (°C)',r'$\Delta$ T (°C)','T (°C)','RH (%)','Angle (°)','Hatch open (1)'] #indicate if difference between all var shall be plotted instead of absolute values 1: yes, 0: no varsdiff=[1,1,0,0,0,0] ylims=[[-0.1,0.1],[-0.1,0.1],[-10,75],[-5,100],[],[-1.1,1.1]] markers=['','','','','.',''] fout=os.path.join(fpathout,datestring+'_'+assist+'_fig01_timeseries_range') set_1={'varsall': varsall,'ylabelstr': ylabelstr,'varsdiff': varsdiff, \ 'ylims': ylims,'markers': markers,'fout':fout} varsall=[['mean_Tb_675_680assistsum','mean_Tb_985_990assistsum','mean_Tb_2510_2515assistsum','tempabs_meanmetpsl','externalTemperatureabsassistsum'],\ ['mean_imaginary_rad_675_680assistsum','mean_imaginary_rad_985_990assistsum','mean_imaginary_rad_2510_2515assistsum'],\ ['cbhceilo'],\ ['resp1000_relativeassistsum','resp2500_relativeassistsum'],\ ['LWskyNENassistsum'],['SWskyNENassistsum']] ylabelstr=['TB (K)','Radiance (RU)','CBH (km)','Rel radiance','Radiance (RU)','Radiance (RU)'] #indicate if difference between all var shall be plotted instead of absolute values 1: yes, 0: no varsdiff=[0,0,0,0,0,0] ylims=[[],[-1,1],[0,7],[0.5,1.1],[0,15],[0,1]] markers=['','','.','','',''] fout=os.path.join(fpathout,datestring+'_'+assist+'_fig02_timeseries_range') set_2={'varsall': varsall,'ylabelstr': ylabelstr,'varsdiff': varsdiff, \ 'ylims': ylims,'markers':markers,'fout':fout} plotspecs={'set_1': set_1,'set_2':set_2} if datestring == yesterday: drange_=[1,10] else: drange_=[1] # drange_=[90] for drange in drange_: xlim=[mdates.date2num(statustime-datetime.timedelta(days=drange)),\ mdates.date2num(statustime)] data={} assistsumplot=assistsum.sel(time=slice(mdates.num2date(xlim[0]).replace(tzinfo=None),mdates.num2date(xlim[1]).replace(tzinfo=None))) data['assistsumplot']=assistsumplot tidx=np.where((assistnfchA.time>=np.datetime64(mdates.num2date(xlim[0]).replace(tzinfo=None))) & \ (assistnfchA.time<=np.datetime64(mdates.num2date(xlim[1]).replace(tzinfo=None))))[0] assistnfchAplot=assistnfchA.isel(time=tidx) data['assistnfchAplot']=assistnfchAplot try: tidx=np.where((metpsl.time>=np.datetime64(mdates.num2date(xlim[0]).replace(tzinfo=None))) & \ (metpsl.time<=np.datetime64(mdates.num2date(xlim[1]).replace(tzinfo=None))))[0] metpslplot=metpsl.isel(time=tidx) data['metpslplot']=metpslplot except NameError: print('metpsl cannot be plotted, probably because no data exist') try: tidx=np.where((ceil.time>=np.datetime64(mdates.num2date(xlim[0]).replace(tzinfo=None))) & \ (ceil.time<=np.datetime64(mdates.num2date(xlim[1]).replace(tzinfo=None))))[0] ceilplot=ceil.isel(time=tidx) data['ceilplot']=ceilplot except NameError: print('ceil cannot be plotted, probably because no data exist') for pspecs_ in plotspecs.keys(): pspecs=plotspecs[pspecs_] plot_timeseries_nativeres(data,pspecs,drange) #%% #plot time series of daily means or counts varsall=[['HBBtopTempassistsum','HBBbottomTempassistsum','HBBapexTempassistsum'],\ ['ABBtopTempassistsum','ABBbottomTempassistsum','ABBapexTempassistsum'],\ ['calibrationCBBtempassistsum','calibrationHBBtempassistsum','calibrationAmbientTempassistsum',\ 'frontEndEnclosureTempassistsum','stepMotorTempassistsum',\ 'secondPortBlackBodytempassistsum','interferometerTempassistsum',\ 'ambientAirPowerSupplyTempassistsum','annotatorFtsPlateTempassistsum',\ 'detectorStirlingCoolerBlockTempassistsum','temp_meanmetpsl'],\ ['interferometerHumidityassistsum','rh_meanmetpsl'],\ ['sceneMirrorAngleassistnfch1'],\ ['hatchOpenassistsum']] ylabelstr=[r'$\Delta$ T (°C)',r'$\Delta$ T (°C)','T (°C)','RH (%)','Freq. (%)','Freq. hatch open (%)'] #indicate if difference between all var shall be plotted instead of absolute values 1: yes, 0: no varsdiff=[1,1,0,0,0,0] ylims=[[-0.1,0.1],[-0.1,0.1],[0,95],[-2,102],[50,102],[50,102]] markers=['','','','','.',''] dailystatstype=['mean','mean','mean','mean','counteq0','counteq1'] fout=os.path.join(fpathout,datestring+'_'+assist+'_fig03_timeseriesmean_range') set_1={'varsall': varsall,'ylabelstr': ylabelstr,'varsdiff': varsdiff, \ 'ylims': ylims,'markers': markers,'fout':fout,\ 'dailystatstype': dailystatstype} varsall=[['mean_Tb_675_680assistsum','mean_Tb_985_990assistsum','mean_Tb_2510_2515assistsum','tempabs_meanmetpsl'],\ ['mean_imaginary_rad_675_680assistsum','mean_imaginary_rad_985_990assistsum','mean_imaginary_rad_2510_2515assistsum'],\ ['cbhceilo'],\ ['resp1000_relativeassistsum','resp1000_relativeassistsum'],\ ['LWskyNENassistsum'],['SWskyNENassistsum']] ylabelstr=['TB (K)','Radiance (RU)','CBH fraction (%)','Rel radiance','Radiance (RU)','Radiance (RU)'] #indicate if difference between all var shall be plotted instead of absolute values 1: yes, 0: no varsdiff=[0,0,0,0,0,0] ylims=[[],[-1,1],[-2,102],[0.5,1.1],[0,2.5],[0,0.2]] markers=['','','.','','',''] dailystatstype=['mean','mean','countgt0','mean','mean','mean'] fout=os.path.join(fpathout,datestring+'_'+assist+'_fig04_timeseriesmean_range') set_2={'varsall': varsall,'ylabelstr': ylabelstr,'varsdiff': varsdiff, \ 'ylims': ylims,'markers':markers,'fout':fout,\ 'dailystatstype': dailystatstype} plotspecs={'set_1': set_1,'set_2':set_2} # plotspecs={'set_1': set_1} if datestring == yesterday or datestring[-2:]=='01': drange_=[90] #%% for drange in drange_: xlim=[mdates.date2num(statustime-datetime.timedelta(days=drange)),\ mdates.date2num(statustime)] try: assistsumplot=assistsum.sel(time=slice(mdates.num2date(xlim[0]).replace(tzinfo=None),mdates.num2date(xlim[1]).replace(tzinfo=None))) tidx=np.where((assistnfchA.time>=np.datetime64(mdates.num2date(xlim[0]).replace(tzinfo=None))) & \ (assistnfchA.time<=np.datetime64(mdates.num2date(xlim[1]).replace(tzinfo=None))))[0] assistnfchAplot=assistnfchA.isel(time=tidx) tidx=np.where((metpsl.time>=np.datetime64(mdates.num2date(xlim[0]).replace(tzinfo=None))) & \ (metpsl.time<=np.datetime64(mdates.num2date(xlim[1]).replace(tzinfo=None))))[0] metpslplot=metpsl.isel(time=tidx) tidx=np.where((ceil.time>=np.datetime64(mdates.num2date(xlim[0]).replace(tzinfo=None))) & \ (ceil.time<=np.datetime64(mdates.num2date(xlim[1]).replace(tzinfo=None))))[0] ceilplot=ceil.isel(time=tidx) data={} data['assistsumplot']=assistsumplot data['assistsumnfchA']=assistnfchAplot data['metpslplot']=metpslplot data['ceilplot']=ceilplot except NameError: print('error') for pspecs_ in plotspecs.keys(): pspecs=plotspecs[pspecs_] plot_timeseries_dailystats(data,pspecs,drange) #%% #status of instrument health #determine closest assistsummary time dt=mdates.date2num(assistsum.time) #drop duplicates _,idx=np.unique(dt,return_index=True) assistsum=assistsum.isel(time=idx) #dropnan asumfin=assistsum['hatchOpen'].dropna(dim='time') try: missingtime='F' sumtime=asumfin.time.sel(time=statustime,method='nearest',tolerance=np.timedelta64(15,'m')) except: print('no valid ASSIST data found at '+statustime.strftime('%Y%m%d %H:%M:%S')) #use last valid time, but indicate in plot sumtime=asumfin.time.sel(time=statustime,method='nearest') sumtime.values=statustime missingtime='T' #get matching nfch1 values dt=mdates.date2num(assistnfchAcurrent.time) #drop duplicates _,idx=np.unique(dt,return_index=True) assistnfchAcurrent=assistnfchAcurrent.isel(time=idx) #zenith only aidx= np.abs(assistnfchAcurrent.sceneMirrorAngle.values)<1e-5 dummy=assistnfchAcurrent.isel(time=aidx) anfch1=dummy.sel(time=sumtime,method='nearest') #compute BB temperature and differences abbvars=['ABBbottomTemp','ABBtopTemp','ABBapexTemp'] abbtemp_=[] abbtempdiff_=[] for var in abbvars: varsd_=[v for v in abbvars if not v in var] for vard in varsd_: # print('difference between '+var +' and '+vard) abbtempdiff_.append(assistsum[var].values-assistsum[vard].values) abbtemp_.append(assistsum[var].values) assistsum['abbtemp']=('time',np.mean(abbtemp_,axis=0)) assistsum['abbtempdiff']=('time',np.max(np.abs(abbtempdiff_),axis=0)) hbbvars=['HBBbottomTemp','HBBtopTemp','HBBapexTemp'] hbbtemp_=[] hbbtempdiff_=[] for var in hbbvars: varsd_=[v for v in hbbvars if not v in var] for vard in varsd_: # print('difference between '+var +' and '+vard) hbbtempdiff_.append(assistsum[var].values-assistsum[vard].values) hbbtemp_.append(assistsum[var].values) assistsum['hbbtemp']=('time',np.mean(hbbtemp_,axis=0)) assistsum['hbbtempdiff']=('time',np.max(np.abs(hbbtempdiff_),axis=0)) #create a variable status_sfcsensor that describes the status of the surface met data (values changing/not constant over the past hour da=np.abs(assistsum.externalTemperature.diff(dim='time')) dasumtemp=da.rolling(time=300,min_periods=5).sum() da=np.abs(assistsum.externalHumidity.diff(dim='time')) dasumrh=da.rolling(time=300,min_periods=5).sum() mask=(dasumtemp >1e-5) | (dasumrh > 1e-5) dasummask=dasumtemp.copy(deep=True) dasummask[mask.compute()]=1 mask=(dasumtemp <=1e-5) | (dasumrh <= 1e-5) dasummask[mask.compute()]=0 assistsum['statusextsensor']=dasummask ## define which variables to include #names to plot dfstr=pd.DataFrame.from_dict({'a': ['ABB mean temp C ','HBB mean temp C '],\ 'b': ['ABB max temp diff C ','HBB max temp diff C '],\ 'c': ['Surface sensor status ','Step motor temp C '],\ 'd': ['Second port BB temp C ','Ambient power supply \ntemp C '],\ 'e': ['Detector stirling\n cooler block temp C ','Annotator/FTS temp C '],\ 'f': ['Interferometer temp C ','Interferometer \nhumidity % '],\ 'g': ['Hatch status ','Rain sensor '],\ 'h': ['resp1000 relative ','resp2500 relative'],\ 'i': ['LW sky NEN ','SW sky NEN'],\ }) #names in netcdf file (summary) varsall=[['abbtemp','hbbtemp'],\ ['abbtempdiff','hbbtempdiff'],\ ['statusextsensor','stepMotorTemp'],\ ['secondPortBlackBodytemp','ambientAirPowerSupplyTemp'],\ ['detectorStirlingCoolerBlockTemp','annotatorFtsPlateTemp'],\ ['interferometerTemp','interferometerHumidity'],\ ['hatchOpen','hatchRainSensor'],\ ['resp1000_relative','resp2500_relative'],\ ['LWskyNEN','SWskyNEN'] ,\ ] #create dataframe abc_=list(string.ascii_lowercase) d_={} for vars_,abc in zip(varsall,abc_): #first drop nan and then search for nearest av0=assistsum[vars_[0]].dropna(dim='time') av1=assistsum[vars_[1]].dropna(dim='time') try: d_[abc]=[av0.sel(time=statustime,method='nearest',tolerance=np.timedelta64(15,'m')).values,\ av1.sel(time=statustime,method='nearest',tolerance=np.timedelta64(15,'m')).values] except: missingtime='T' # d_[abc]=[assistsum[vars_[0]].sel(time=statustime,method='nearest',tolerance=np.timedelta64(60,'s')).values,\ # assistsum[vars_[1]].sel(time=statustime,method='nearest',tolerance=np.timedelta64(60,'s')).values] # d_[abc]=[assistsum[vars_[0]].values[0],assistsum[vars_[1]].values[0]] dfval=pd.DataFrame.from_dict(d_) #if time stamp is missing set all values to nan if missingtime == 'T': dfval=dfval*np.nan anfch1.mean_rad.values=anfch1.mean_rad.values*np.nan #take care of digits (columns wise) dfval_=pd.DataFrame() dfval_['a']=dfval['a'].apply(lambda x : '{:.2f}'.format(x)) dfval_['b']=dfval['b'].apply(lambda x : '{:.4f}'.format(x)) dfval_['c']=dfval['c'].apply(lambda x : '{:.2f}'.format(x)) dfval_['d']=dfval['d'].apply(lambda x : '{:.2f}'.format(x)) dfval_['e']=dfval['e'].apply(lambda x : '{:.2f}'.format(x)) dfval_['f']=dfval['f'].apply(lambda x : '{:.2f}'.format(x)) dfval_['g']=dfval['g'].apply(lambda x : '{:.0f}'.format(x)) dfval_['h']=dfval['h'].apply(lambda x : '{:.2f}'.format(x)) dfval_['i']=dfval['i'].apply(lambda x : '{:.2f}'.format(x)) #define thresholds dfmaxg=pd.DataFrame.from_dict({'a': [45,60.05],\ 'b': [0.105,0.4],\ 'c': [1.1,45],\ 'd': [35,35],\ 'e': [55,40],\ 'f': [35,15],\ 'g': [1.1,1.1],\ 'h': [1,1],\ 'i': [2,0.2],\ }) dfming=pd.DataFrame.from_dict({'a': [-15,59.95],\ 'b': [0,0],\ 'c': [0.9,-15],\ 'd': [20,20],\ 'e': [35,25],\ 'f': [20,-0.1],\ 'g': [0.9,0.9],\ 'h': [0.85,0.85],\ 'i': [0,0],\ }) dfmaxy=pd.DataFrame.from_dict({'a': [50,60.1],\ 'b': [0.135,0.5],\ 'c': [1.1,55],\ 'd': [50,55],\ 'e': [65,55],\ 'f': [40,20],\ 'g': [1.1,1.1],\ # 'g': [1.2,1.2],\ 'h': [1,1],\ 'i': [3,0.3],\ }) dfminy=pd.DataFrame.from_dict({'a': [-25,59.9],\ 'b': [0,0],\ 'c': [0.9,-25],\ 'd': [10,-10],\ 'e': [30,25],\ 'f': [15,-0.1],\ # 'g': [0.8,0.8],\ 'g': [0.9,0.9],\ 'h': [0.75,0.75],\ 'i': [0,0],\ }) #create dataframe for plotting consisting of name and value dfstrval=dfstr+'\n'+dfval_ #colors for realtime status dfcol=dfval.copy() dfcol.loc[:]='limegreen' mask=(dfvaldfming) dfcol=dfcol.where(mask,other='gold') mask=(dfvaldfminy) dfcol=dfcol.where(mask,other='orangered') if missingtime == 'T': dfcol[:]='magenta' #%% #plot realtime status values fout=os.path.join(fpathout,datestring+'_'+assist+'_fig05_status_realtime.png') fig = plt.figure(figsize=(15,10)) gs=gridspec.GridSpec(2,2,figure=fig) ax1=fig.add_subplot(gs[:,0]) ax1.axis('off') tab=ax1.table(cellText=dfstrval.values.T,loc='center',cellColours=dfcol.values.T,cellLoc='center') tab.scale(1,5) tab.auto_set_font_size(False) tab.set_fontsize('x-large') ax2=fig.add_subplot(gs[0,1]) ax2.plot(anfch1.wnum,anfch1.mean_rad) ax2.grid('on') ax2.set_xlim(525,1825) ax2.set_ylim(0,170) ax2.set_xlabel('Wavenumber (cm$^{-1}$)') ax2.set_ylabel('Radiance (mW/(cm$^{-1}$ m$^{2}$ sr)') # ax2t=ax2.twinx() # ax2t.plot(asum.wnumsum1,asum.SkyNENCh1) ax3=fig.add_subplot(gs[1,1]) if len(listdircam)==0: ax3.text(0.5,0.5,'no current camera image',fontsize='x-large',ha='center') ax3.axis('off') else: camdates=[datestr2num(camdate[-19:-4],'%Y%m%d_%H%M%S') for camdate in listdircam] #try to show image closeest to statustime # idx=np.argmin(np.abs(camdates-mdates.date2num(statustime))) #try to show image closest to assist time idx=np.argmin(np.abs(camdates-mdates.date2num(sumtime.values))) im=plt.imread(listdircam[idx]) # camdate=datestr2num(listdircam[-1][-19:-4],'%Y%m%d_%H%M%S') #no more than 5 min sway from desired time if np.abs(camdates[idx]-mdates.date2num(statustime))>5/(24*60): ax3.text(0.5,0.5,'no current camera image',fontsize='x-large',ha='center') ax3.axis('off') else: ax3.axis('off') ax3.imshow(im) logo=fig.add_axes([0.05,0.88,0.1,0.1],anchor='NW',zorder=1) im=plt.imread(os.path.join(funcdir,'noaa_web.png')) logo.imshow(im) logo.axis('off') plt.subplots_adjust(bottom=0.2) fig.suptitle(assist+ ' '+ site+' instrument health at '+\ num2datestr(mdates.date2num(sumtime.values),'%d %b %Y %H:%M:%S'),fontsize='xx-large',y=0.92) plt.text(0.9, 0.00, 'ASSIST housekeeping'+\ '\nContact: Bianca.Adler@noaa.gov, Creation date: '+datetime.datetime.now(datetime.UTC).strftime('%d %b %Y %H:%M UTC'), \ fontsize=14, transform=plt.gcf().transFigure,\ ha='right') print('save as '+fout) plt.savefig(fout,format='png',dpi=dpi,bbox_inches='tight') #%% threshold values (table only) fout=os.path.join(fpathout,datestring+'_'+assist+'_fig06_status_thresholds.png') fig = plt.figure(figsize=(15,10)) gs=gridspec.GridSpec(2,2,figure=fig) ax1=fig.add_subplot(gs[:,0]) # ax1.axis('off') dfplot=dfstr+'\n'+dfming.astype(str)+'-'+dfmaxg.astype(str) dfplotc=dfplot.copy() dfplotc.loc[:]='limegreen' ax1.axis('off') tab=ax1.table(cellText=dfplot.values.T,loc='center',cellColours=dfplotc.values.T,cellLoc='center') tab.scale(1,5) tab.auto_set_font_size(False) tab.set_fontsize('x-large') ax2=fig.add_subplot(gs[:,1]) dfplot=dfstr+'\n'+dfminy.astype(str)+' - '+dfming.astype(str)+' or '+dfmaxg.astype(str)+' - '+dfmaxy.astype(str) #if min are the same idx=dfminy==dfming dfplot[idx]=dfstr[idx]+'\n'+dfmaxg[idx].astype(str)+' - '+dfmaxy[idx].astype(str) #if max are the same idx=dfmaxy==dfmaxg dfplot[idx]=dfstr[idx]+'\n'+dfminy[idx].astype(str)+' - '+dfming[idx].astype(str) #if min and max are the same idx=(dfmaxy==dfmaxg) & (dfminy==dfming) dfplot[idx]=dfstr[idx]+'\n'+'n/a' # dfplot=dfstr+'\n'+dfminy.astype(str)+'-'+dfmaxy.astype(str) dfplotc=dfplot.copy() dfplotc.loc[:]='gold' ax2.axis('off') tab=ax2.table(cellText=dfplot.values.T,loc='center',cellColours=dfplotc.values.T,cellLoc='center') tab.scale(1,5) tab.auto_set_font_size(False) tab.set_fontsize('x-large') logo=fig.add_axes([0.15,0.88,0.1,0.1],anchor='NW',zorder=1) im=plt.imread(os.path.join(funcdir,'noaa_web.png')) logo.imshow(im) logo.axis('off') plt.subplots_adjust(bottom=0.2) fig.suptitle(assist+ ' '+ site+ ' threshold values',fontsize='xx-large',y=0.92) plt.text(0.9, 0.15, 'ASSIST housekeeping'+\ '\nContact: Bianca.Adler@noaa.gov, Creation date: '+datetime.datetime.now(datetime.UTC).strftime('%d %b %Y %H:%M UTC'), \ fontsize=14, transform=plt.gcf().transFigure,\ ha='right') print('save as '+fout) plt.savefig(fout,format='png',dpi=dpi,bbox_inches='tight') #%% if datestring == yesterday: drange_=[1,90] fignumber=['07','08'] elif datestring[-2:] == '01': drange_=[1,90] fignumber=['07','08'] else: drange_=[1] fignumber=['07'] #determine median temporal resolution sec=np.median(np.diff(assistsum.time.values))/np.timedelta64(1,'s') dt=mdates.date2num(assistsum.time) #drop duplicates _,idx=np.unique(dt,return_index=True) assistsum=assistsum.isel(time=idx) #resample, so that mssing values have timestamp assistsum=assistsum.resample(time=str(sec)+'s').nearest(tolerance=str(sec*2)+'s') dt=mdates.date2num(assistsum.time) #create array from variables and thresholds varsalla=np.asarray(varsall).T.flatten() varsnamea=dfstr.to_numpy().flatten() dfmaxga=dfmaxg.to_numpy().flatten() dfminga=dfming.to_numpy().flatten() dfmaxya=dfmaxy.to_numpy().flatten() dfminya=dfminy.to_numpy().flatten() for drange,count in zip(drange_,range(len(drange_))): # fout=os.path.join(fpathout,assistsum.time[-1].values.astype('datetime64[s]').item().strftime('%Y%m%d')+'_'+\ # assist+'_fig'+fignumber[count]+'_status_range_'+str(drange)) fout=os.path.join(fpathout,datestring+'_'+\ assist+'_fig'+fignumber[count]+'_status_range_'+str(drange)+'.png') xlim=[mdates.date2num(statustime-datetime.timedelta(days=drange)),\ mdates.date2num(statustime)] asum=assistsum.sel(time=slice(mdates.num2date(xlim[0]).replace(tzinfo=None),mdates.num2date(xlim[1]).replace(tzinfo=None))) dt=mdates.date2num(asum.time) dall=np.full((len(varsalla),len(asum.time)),0.) for var,count in zip(varsalla,range(len(varsalla))): idx=np.where((asum[var]>dfminya[count]) & (asum[var]dfminga[count]) & (asum[var]