Commit 4409903f authored by Espen Sollum's avatar Espen Sollum
Browse files

commit before refactoring and looking at background

parent 071c32e5
......@@ -43,11 +43,13 @@ def build_hcorrelations(zlat, zlon, lsm,
# Define domain dimensions
nlon, nlat = zlat.shape
import pdb; pdb.set_trace()
# Try reading existing file
try:
evalues, evectors = \
read_hcorr(nlon, nlat, sigma_sea, sigma_land, dir_dump)
# Else build correlations
except IOError:
......@@ -107,7 +109,7 @@ def dump_hcorr(nlon, nlat, sigma_sea, sigma_land,
ncell = evalues.size
file_dump = '{}/horcor_{}x{}_cs{}_cl{}_lmdz.bin'.format(
file_dump = '{}/horcor_{}x{}_cs{}_cl{}.bin'.format(
dir_dump, nlon, nlat, sigma_sea, sigma_land)
if os.path.isfile(file_dump):
......@@ -115,8 +117,10 @@ def dump_hcorr(nlon, nlat, sigma_sea, sigma_land,
"I don't want to overwrite it".format(file_dump))
datasave = np.concatenate((evalues[np.newaxis, :], evectors), axis=0)
datasave.tofile(file_dump)
if isinstance(datasave, np.ma.MaskedArray):
datasave.filled().tofile(file_dump)
else:
datasave.tofile(file_dump)
def read_hcorr(nlon, nlat, sigma_sea, sigma_land, dir_dump):
"""Reads horizontal correlations from existing text file
......@@ -128,7 +132,7 @@ def read_hcorr(nlon, nlat, sigma_sea, sigma_land, dir_dump):
"""
file_dump = '{}/horcor_{}x{}_cs{}_cl{}_lmdz.bin'.format(
file_dump = '{}/horcor_{}x{}_cs{}_cl{}.bin'.format(
dir_dump, nlon, nlat, sigma_sea, sigma_land)
if not os.path.isfile(file_dump):
......
......@@ -56,18 +56,29 @@ def dump(self, cntrl_file, to_netcdf=False, dir_netcdf=None, **kwargs):
(tracer.ndates, -1))
x = scale2map(x, tracer, tracer.dates, self.domain)
import pdb; pdb.set_trace()
# If 'offsets', add prior fluxes
if getattr(tracer, 'offsets', False):
x[:,0,:,:] += self.flxall/np.float(self.model.numscale)
xb = np.reshape(
self.xb[tracer.xpointer:tracer.xpointer + tracer.dim],
(tracer.ndates, -1))
xb = scale2map(xb, tracer, tracer.dates, self.domain)
# ESO: unaggregated fluxes, for testing
arr = self.flxall[:, np.newaxis, :, :]/np.float(self.model.numscale)
xb_grid = xr.DataArray(arr,
coords={'time': tracer.dates[:-1], 'lev': tracer.levels},
dims=('time', 'lev', 'lat', 'lon'))
std = np.reshape(
self.std[tracer.xpointer:tracer.xpointer + tracer.dim],
(tracer.ndates, -1))
std = scale2map(std, tracer, tracer.dates, self.domain)
ds = xr.Dataset({'x': x, 'xb': xb, 'std': std, 'lon' : tracer.domain.lon, 'lat' :
tracer.domain.lat})
ds = xr.Dataset({'x': x, 'xb': xb, 'xb_grid': xb_grid, 'std': std,
'lon': tracer.domain.lon, 'lat': tracer.domain.lat})
ds.to_netcdf('{}/controlvect_{}_{}.nc'.format(dir_comp, comp, trcr))
......
......@@ -32,6 +32,7 @@ def init_bprod(cntrlv, options={}, **kwargs):
tracer = getattr(component.parameters, trcr)
# Deals with horizontal correlations if any
# ESO: check what to do with regions
if hasattr(tracer, 'hcorrelations') and tracer.hresol == 'hpixels':
corr = tracer.hcorrelations
dump_hcorr = getattr(corr, 'dump_hcorr', False)
......@@ -112,7 +113,7 @@ def init_bprod(cntrlv, options={}, **kwargs):
# as the last dates indicates the end of the period
sqrt_evalues, evectors = \
build_tcorrelations(
period, dates, sigma_t,
period, dates[:-1], sigma_t,
evalmin=evalmin,
dump=dump_tcorr, dir_dump=dircorrel,
**kwargs)
......@@ -129,7 +130,7 @@ def init_bprod(cntrlv, options={}, **kwargs):
else:
tracer.chi_tresoldim = tracer.ndates
# Update the dimension of the full control vector
tracer.chi_pointer = cntrlv.chi_dim
tracer.chi_dim = tracer.chi_hresoldim * tracer.chi_tresoldim
......
......@@ -3,6 +3,7 @@ from scipy import ndimage
from pycif.utils import dates
from .utils.dimensions import hresol2dim, vresol2dim
from .utils.scalemaps import map2scale, vmap2vaggreg
import xarray as xr
def init_xb(cntrlv, **kwargs):
......@@ -56,20 +57,48 @@ def init_xb(cntrlv, **kwargs):
# Scale xb if prescribed in Yaml
xb_scale = getattr(tracer, 'xb_scale', 1.)
import pdb; pdb.set_trace()
# Filling with defined value if prescribed
xb_value = getattr(tracer, 'xb_value', 0.)
# Filling Xb and uncertainties
if getattr(tracer, 'type') == 'flexpart':
# ESO Reading fluxes and nested fluxes for flexpart
# For now, only aggregation to regions is supported
flxall = tracer.read(
trcr, tracer.dir, tracer.file, tracer.dates,
comp_type=comp,
**kwargs) * xb_scale \
+ xb_value
xb = np.ones(tracer.dim) * xb_scale + xb_value
# import pdb; pdb.set_trace()
flx_regions = map2scale(
flxall[:, np.newaxis, :, :], tracer, tracer.domain).flatten()
xb *= flx_regions
std = tracer.err * np.abs(xb)
# Set a lower limit on the flux error
tracer.flxerr_ll = getattr(tracer, 'flxerr_ll', 1.e-8)
std[std <= tracer.flxerr_ll] = tracer.flxerr_ll
# Keep the un-aggregated fluxes for later use in obsoper
cntrlv.flxall = flxall
# Most types are scalar factors
# import pdb ; pdb.set_trace()
if getattr(tracer, 'type', 'scalar') == 'scalar' \
or getattr(tracer, 'hresol', '') != 'hpixels' \
or getattr(tracer, 'vresol', '') != 'vpixels':
elif getattr(tracer, 'type', 'scalar') == 'scalar' \
or getattr(tracer, 'hresol', '') != 'hpixels' \
or getattr(tracer, 'vresol', '') != 'vpixels':
tracer.type = 'scalar'
xb = np.ones(tracer.dim) * xb_scale + xb_value
std = tracer.err * xb
# Physical variables stores uncertainties in the physical space
# Valid only for control variables at the pixel resolution
......@@ -80,8 +109,7 @@ def init_xb(cntrlv, **kwargs):
comp_type=comp,
**kwargs).data * xb_scale \
+ xb_value
pdb.set_trace()
# Vertical aggregation depending on vertical stacks
vaggreg = vmap2vaggreg(flxall, tracer, tracer.domain)
xstack = map2scale(vaggreg, tracer, cntrlv.domain)
......@@ -110,7 +138,5 @@ def init_xb(cntrlv, **kwargs):
# Appending local xb to the control vector
cntrlv.xb = np.append(cntrlv.xb, xb)
cntrlv.std = np.append(cntrlv.std, std)
# ESO
print "controlvect:shape(cntrlv.xb)", np.shape(cntrlv.xb)
return cntrlv
......@@ -61,6 +61,12 @@ def hresol2dim(tracer, dom, **kwargs):
tr_tmp = tr_tmp + np.abs(tr_tmp.min()) + 1
tracer.regions = tr_tmp.astype(int)
# Get surface area for each region box
tracer.region_areas = np.zeros(tracer.nregions)
for i,j in enumerate(np.unique(tracer.regions)):
tracer.region_areas[i] = \
tracer.domain.areas[np.where(tracer.regions == j)].sum()
# Check that regions have the correct dimensions
if tracer.regions.shape != (dom.nlat, dom.nlon):
raise Exception("Regions were not correctly defined in {}"
......
......@@ -44,6 +44,7 @@ def scale2map(x, tracer, dates, dom):
iband += 1
elif tracer.hresol == 'regions':
ndates = len(dates) - 1
xmap = np.zeros((ndates, tracer.vresoldim, dom.nlat, dom.nlon))
for regID, reg in enumerate(np.unique(tracer.regions)):
xmap[..., tracer.regions == reg] = \
......@@ -64,16 +65,18 @@ def scale2map(x, tracer, dates, dom):
return xmap
def map2scale(xmap, tracer, dom):
def map2scale(xmap, tracer, dom, scale_area=False):
"""Translates a map to a control vector slice
Args:
xmap: the map
tracer: tracer Class with corresponding attributes
dom: the model domain
scale_area: For regions, scale map with area size
Returns:
the control vector vertical slice
"""
# Checking that xmap has the correct dimension
......@@ -113,6 +116,7 @@ def map2scale(xmap, tracer, dom):
iband += 1
elif tracer.hresol == 'regions':
# TODO: add option to scale with area size
x = np.zeros((ndates, tracer.vresoldim, tracer.nregions))
for regID, reg in enumerate(np.unique(tracer.regions)):
x[..., regID] = np.sum(xmap[..., tracer.regions == reg], axis=2)
......
......@@ -63,10 +63,9 @@ def parse_tracers(self,
raise e
# Otherwise, create the monitor from observations
if hasattr(self, 'workdir'):
workdir = self.workdir
fic_monitor = workdir + '/obs/monit_standard.nc'
if hasattr(self, 'workdir'):
workdir = self.workdir
fic_monitor = workdir + '/obs/monit_standard.nc'
# If the measurement definition is empty in the Yaml,
# return an empty datastore
......
......@@ -46,13 +46,13 @@ def execute(self, **kwargs):
for trcr in controlvect.components.fluxes.parameters.attributes:
tracer = getattr(controlvect.components.fluxes.parameters, trcr)
dates = tracer.dates[:-1]
dates = tracer.dates[:]
# Fetching the correct slice in the control vector
tmp = controlvect.x[tracer.xpointer:tracer.xpointer + tracer.dim]
# Projecting to a map
flx_map = controlvect.utils.scale2map(tmp, tracer, dates,
flx_map = controlvect.utils.scalemaps.scale2map(tmp, tracer, dates,
controlvect.domain)
# Changing time format for saving in netcdf
......@@ -62,13 +62,13 @@ def execute(self, **kwargs):
flx_maps[trcr] = flx_map
tmp = controlvect.xb[tracer.xpointer:tracer.xpointer + tracer.dim]
flx_map = controlvect.utils.scale2map(tmp, tracer, dates,
flx_map = controlvect.utils.scalemaps.scale2map(tmp, tracer, dates,
controlvect.domain)
flx_maps_bg[trcr] = flx_map
# Do interactive stuff
import code
code.interact(local=dict(globals(), **locals()))
# import code
# code.interact(local=dict(globals(), **locals()))
# Save output as NetCDF
flx_maps[trcr].to_netcdf(
......
......@@ -120,6 +120,6 @@ def init_y0(obsvect, measurements, **kwargs):
nc_attributes=nc_attributes,
mode='w')
import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
return obsvect
......@@ -78,13 +78,15 @@ def simul(self, chi, grad=True, run_id=-1, **kwargs):
obsvect.datastore['obs_incr'] = \
departures / obsvect.datastore['obserror'] ** 2
controlvect = obsoper.obsoper(obsvect, 'footprint',
datei=datei, datef=datef,
workdir=workdir, run_id=run_id,
reload_results=reload_results,
**kwargs)
# ESO rewrite this...
# controlvect = obsoper.obsoper(obsvect, 'footprint',
# datei=datei, datef=datef,
# workdir=workdir, run_id=run_id,
# reload_results=reload_results,
# **kwargs)
# import pdb; pdb.set_trace()
#import pdb ; pdb.set_trace()
controlvect.dx = obsvect.dx
# Observational part of the gradient
......
......@@ -80,3 +80,25 @@ def date_range(datei, datef, period='1000000H', subperiod=''):
list_dates.remove(d0)
return np.array(list_dates)
def j2d(fp_day):
""" Convert FLEXPART julian day to datetime
Args:
fp_day: julian day in FLEXPART format (type real)
Return:
datetime object
Note:
1721425 is number of days BCE in current epoch
"""
return datetime.datetime.combine(
datetime.date.fromordinal(int(fp_day)), datetime.time()) - \
datetime.timedelta(days=1721425)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment