Commit 32840ddc authored by Antoine Berchet's avatar Antoine Berchet
Browse files

LMDZ is making good progress; stuck because of satellites and what to extract...

LMDZ is making good progress; stuck because of satellites and what to extract from LMDZ: airm? hlay?
parent a5b4047e
......@@ -13,7 +13,7 @@ export PYCIF_PLATFORM=LSCE
mark="(dummy and article and inversion and not adjtltest and not uncertainties) or (fwd and ref_config)"
#mark="(fwd and ref_config)"
#mark="test_in_ci and dummy and adjtltest"
mark="test_in_ci and chimere and fwd"
mark="test_in_ci and chimere"
#mark="test_in_ci and flexpart"
###
......
......@@ -998,6 +998,8 @@ SUBROUTINE timeloop(nobs_glo, time_0, iday_step, fwd, nsplit, nsplit_dyn, &
tabobs(i,7) = tabobs(i,7) + ppmv
! Save pressure
! Column 9: pressure at middle of the level
! Column 10: pressure thickness of the level
l = nint( (tabobs(i,6) - iq)*100 )
tabobs(i,9) = tabobs(i,9) + (p(ii,jj,l)+p(ii,jj,l+1))/2.
tabobs(i,10) = tabobs(i,10) + p(ii,jj,l)-p(ii,jj,l+1)
......
......@@ -79,8 +79,8 @@ MODULE CONSTANTS
REAL, PARAMETER :: boltz = 1.38044e-16 ! erg/K
REAL, PARAMETER :: avogadro = 6.02214199e+23 ! mol-1
REAL, PARAMETER :: dry_mass= 28.966 !masse mol air sec
INTEGER, PARAMETER :: p_ref=101325. !pression a 1 atm
REAL, PARAMETER :: dry_mass= 28.966 ! molar mass of dry air
INTEGER, PARAMETER :: p_ref=101325. !pressure at 1 atm
END MODULE CONSTANTS
......
......@@ -23,7 +23,7 @@ L_BIBIO = -lbibio
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Option de compilation FORTRAN
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
OPTIMTRU90 = -cpp -O3 -DCPP_PARA -assume byterecl -r8 -DNC_DOUBLE -assume buffered_io -check all -traceback
OPTIMTRU90 = -cpp -O3 -DCPP_PARA -assume byterecl -r8 -DNC_DOUBLE -assume buffered_io #-check all -traceback
INCLUDE = -I$(LIBF)/grid -I$(LIBF)/bibio -I. -I$(LIBF)/dyn3d -I/usr/local/include -I$(LIBF)/phylmd -I${LOCAL_DIR}/libo -module $(LIBO) -I $(LIBO)
F90 = mpif90
COMPTRU90 = $(F90) $(OPTIMTRU90) $(INCLUDE) -c
......
......@@ -22,11 +22,11 @@ def default_fetch(
for dd in list_period_dates:
file = dd.strftime("{}/{}".format(ref_dir, ref_file))
if os.path.isfile(file):
list_dates[dd] = []
list_dates[dd] = [dd]
list_files[dd] = [file]
# Fetching
target_file = "{}/{}".format(target_dir, os.path.basename(file))
path.link(file, target_file)
return list_files, list_dates
import pandas as pd
from pycif.utils.path import init_dir
from .fetch import default_fetch
from logging import warning
......@@ -86,7 +87,40 @@ def init_components(plugin):
# continue
# else:
raise e
# Crop files and dates depending on global datei and datef
try:
list_files = {
ddi: [
ff for ff, dd
in zip(list_files[ddi], list_dates[ddi])
if dd[0] < plugin.datef and dd[1] > plugin.datei
]
for ddi in list_files}
list_dates = {
ddi: [dd for dd in list_dates[ddi]
if dd[0] < plugin.datef
and dd[1] > plugin.datei
]
for ddi in list_dates}
except Exception as e:
print(e)
pass
# Clean empty periods
# Make sure that periods are datetime objects
list_files = {
pd.DatetimeIndex([ddi]).to_pydatetime()[0]:
list_files[ddi] for ddi in list_files
# if list_dates[ddi] != []
}
list_dates = {
pd.DatetimeIndex([ddi]).to_pydatetime()[0]:
list_dates[ddi] for ddi in list_dates
# if list_dates[ddi] != []
}
# Attach info to the tracer
tracer.input_dates = list_dates
tracer.input_files = list_files
......
......@@ -5,7 +5,13 @@ from .utils import find_valid_file
def fetch(ref_dir, ref_file, input_dates, target_dir, tracer=None, **kwargs):
"""
"""
# Inputs:
#---------
# ref_dir: directory where the original files are found
......
......@@ -6,12 +6,11 @@ from netCDF4 import Dataset
def read(
self,
name,
tracdir,
tracfile,
varnames,
dates,
files,
interpol_flx=False,
comp_type=None,
tracer=None,
model=None,
**kwargs
):
......@@ -27,10 +26,10 @@ def read(
values of surrounding available files
"""
list_file_prescr = [dd.strftime(tracfile) for dd in dates]
trcr_prescr = []
for dd, file_prescr in zip(dates, list_file_prescr):
ds = xr.open_dataset("{}/{}".format(tracdir, file_prescr), decode_times=True)
trcr_dates = []
for dd, file_prescr in zip(dates, files):
ds = xr.open_dataset(file_prescr, decode_times=True)
data = ds[name].values
......@@ -38,7 +37,7 @@ def read(
data = np.append(data, data[..., np.newaxis, 0], axis=3)
# Searching for the right day
day = dd.day
day = dd[0].day
ds_days = ds["time_counter.day"]
if day in ds_days:
data = data[day - 1]
......@@ -46,10 +45,11 @@ def read(
data = data[0]
trcr_prescr.append(data)
trcr_dates.append(dd[0])
xmod = xr.DataArray(
np.array(trcr_prescr),
coords={"time": dates},
coords={"time": trcr_dates},
dims=("time", "lev", "lat", "lon")
)
......
from pycif.utils.classes.fluxes import Fluxes
import numpy as np
import os
import xarray as xr
def write(self, prescr_file, prescr, typefile, mode="a"):
"""Write prescribed species files for LMDZ
......
......@@ -10,24 +10,26 @@ def fetch(ref_dir, ref_file, date_interval, target_dir, tracer=None, **kwargs):
# Reshape input interval to include full years
datei, datef = date_interval
datei = datetime.datetime(year=datei.year, month=1, day=1)
datef = datetime.datetime(year=datef.year + 1, month=1, day=1)
if datetime.datetime(year=datef.year, month=1, day=1) < datef:
datef += pd.DatetimeIndex([datef]).days_in_month[0] \
* datetime.timedelta(days=1)
list_period_dates = \
pd.date_range(datei,datef, freq=tracer.file_freq)
pd.date_range(datei, datef, freq=tracer.file_freq)
list_dates = {}
list_files = {}
for dd in list_period_dates:
dout = datetime.datetime(dd.year + 1, 1, 1)
file = dd.strftime("{}/{}".format(ref_dir, ref_file))
if os.path.isfile(file):
file_hours = pd.date_range(
dd, dout, freq="1D")
list_dates[dd] = [[hh, hh + datetime.timedelta(hours=24)]
for hh in file_hours[:-1]]
list_files[dd] = (len(list_dates[dd]) * [file])
file_hours = pd.date_range(dd, dout, freq="1MS")
list_dates[dd] = [
[hh0, hh1] for hh0, hh1 in zip(file_hours[:-1], file_hours[1:])
]
# Fetching
target_file = "{}/{}".format(target_dir, os.path.basename(file))
path.link(file, target_file)
list_files[dd] = (len(list_dates[dd]) * [target_file])
return list_files, list_dates
......@@ -6,12 +6,12 @@ from logging import debug
def read(
self,
name,
tracdir,
tracfile,
varname,
varnames,
dates,
files,
interpol_flx=False,
tracer=None,
model=None,
**kwargs
):
"""Get fluxes from pre-computed fluxes and load them into a pycif
......@@ -27,16 +27,13 @@ def read(
"""
list_file_flx = [dd.strftime(tracfile) for dd in dates]
debug("Reading LMDZ sflx files for {} dates".format(len(list_file_flx)))
debug("Reading LMDZ sflx files for {} dates".format(len(dates)))
# Reading fluxes for periods within the simulation window
trcr_flx = []
for dd, file_flx in zip(dates, list_file_flx):
nc = xr.open_dataset(
"{}/{}".format(tracdir, file_flx), decode_times=False
)
trcr_dates = []
for dd, file_flx in zip(dates, files):
nc = xr.open_dataset(file_flx, decode_times=False)
nlon = self.domain.nlon
nlat = self.domain.nlat
......@@ -44,7 +41,7 @@ def read(
# Vector to map
# Deals with polar boxes by sub-dividing them zonally
# Also loops zonally for consistency with other call to gridded values
flx = nc[varname].values
flx = nc[varnames].values
flx0 = flx[:, 0]
flx1 = flx[:, -1]
......@@ -65,37 +62,38 @@ def read(
# Keeps only values for the corresponding month
# Assumes monthly resolution
if nc.dims["time"] == 12:
month = dd.month
month = dd[0].month
flx = flx[month - 1]
else:
flx = flx[0]
trcr_flx.append(flx)
# Interpolating fluxes temporally between file values
if interpol_flx:
weights = []
weights_inds = []
for flx_file, flx, dd in zip(list_file_flx, trcr_flx, dates):
inds = [
k for k, flxx in enumerate(trcr_flx) if np.all(flx == flxx)
]
w0 = dd - dates[inds[0]]
w1 = dates[min(inds[-1] + 1, len(dates) - 1)] - dd
dt = w1 + w0
w0 = w0.total_seconds() / float(dt.total_seconds())
w1 = w1.total_seconds() / float(dt.total_seconds())
weights.append((w0, w1))
weights_inds.append((inds[0], min(inds[-1] + 1, len(dates) - 1)))
trcr_dates.append(dd[0])
trcr_flx_interp = []
for k, ((w0, w1), (i0, i1)) in enumerate(zip(weights, weights_inds)):
trcr_flx_interp.append(trcr_flx[i0] * w1 + trcr_flx[i1] * w0)
trcr_flx = trcr_flx_interp
# # Interpolating fluxes temporally between file values
# if interpol_flx:
# weights = []
# weights_inds = []
# for flx_file, flx, dd in zip(files, trcr_flx, dates):
# inds = [
# k for k, flxx in enumerate(trcr_flx) if np.all(flx == flxx)
# ]
# w0 = dd - dates[inds[0]]
# w1 = dates[min(inds[-1] + 1, len(dates) - 1)] - dd
# dt = w1 + w0
# w0 = w0.total_seconds() / float(dt.total_seconds())
# w1 = w1.total_seconds() / float(dt.total_seconds())
# weights.append((w0, w1))
# weights_inds.append((inds[0], min(inds[-1] + 1, len(dates) - 1)))
#
# trcr_flx_interp = []
# for k, ((w0, w1), (i0, i1)) in enumerate(zip(weights, weights_inds)):
# trcr_flx_interp.append(trcr_flx[i0] * w1 + trcr_flx[i1] * w0)
# trcr_flx = trcr_flx_interp
xmod = xr.DataArray(
np.array(trcr_flx)[:, np.newaxis, ...],
coords={"time": dates},
coords={"time": trcr_dates},
dims=("time", "lev", "lat", "lon"),
)
......
......@@ -130,6 +130,10 @@ def ini_data(plugin, **kwargs):
source = plugin.fileexec
shutil.copy(source, target)
# Keep in memory from where the executable was copied
with open("{}/model/README".format(workdir), "w") as f:
f.write("DISPERSION was copied from: {}".format(source))
# LMDZ has a fixed integration time step
plugin.tstep = 0
......
......@@ -3,7 +3,8 @@ import datetime
def ini_mapper(model, transform_type, general_mapper={}, backup_comps={},
transforms_order=[], ref_transform="", **kwargs):
transforms_order=[], ref_transform="", transform_name="",
**kwargs):
input_intervals = {
ddi: np.append(
......@@ -36,16 +37,18 @@ def ini_mapper(model, transform_type, general_mapper={}, backup_comps={},
("chem_fields", ""): default_dict,
},
"outputs": {
("concs", s): {
(outcomp, s): {
"isobs": True, "force_loadout": True,
"input_dates": output_intervals,
"domain": model.domain,
"sampled": True
}
for s in model.chemistry.acspecies.attributes
for outcomp in ["concs", "pressure", "dpressure", "airm", "hlay"]
}
}
# Inputs dict
emis = {
("fluxes", s): dict_surface
for s in model.chemistry.emis_species.attributes
......@@ -54,10 +57,30 @@ def ini_mapper(model, transform_type, general_mapper={}, backup_comps={},
("inicond", s): dict_ini for s in model.chemistry.acspecies.attributes
}
prescrcond = {
("prescrconcs", s): dict_surface
("prescrconcs", s): {**dict_surface, **{"force_loadin": True}}
for s in model.chemistry.prescrconcs.attributes
}
# End concentrations from previous period for all active species
# are needed for later periods
endconcs_in = {
("endconcs", s):
{"input_dates": {ddi: np.array([[model.input_dates[ddi][0]]])
for ddi in list(model.input_dates.keys())[1:]},
"domain": model.domain, "force_dump": True}
for s in model.chemistry.acspecies.attributes
}
# End concentrations are saved for all periods
endconcs_out = {
("endconcs", s):
{"input_dates": {ddi: np.array([[model.input_dates[ddi][-1]]])
for ddi in model.input_dates},
"domain": model.domain, "force_loadout": True}
for s in model.chemistry.acspecies.attributes
}
# Photochemistry
list_var = \
[j[1][1] for j in model.chemistry.photo_rates.iterrows()] \
+ ['pmid', 'temp']
......@@ -76,10 +99,16 @@ def ini_mapper(model, transform_type, general_mapper={}, backup_comps={},
prodloss3d = {}
mapper["inputs"].update(
{**emis, **inicond, **prescrcond, **prodloss3d, **photj})
{**emis, **inicond, **prescrcond, **prodloss3d, **photj, **endconcs_in})
mapper["outputs"].update(endconcs_out)
# Accepts backup components instead of reference ones
if hasattr(model, "backup_comps"):
backup_comps.update(model.backup_comps)
# Force the transformation to be in its own precursors and successors
# to propagate end concentrations
mapper["precursors"] = {trid: [transform_name] for trid in endconcs_in}
mapper["successors"] = {trid: [transform_name] for trid in endconcs_in}
return mapper
......@@ -38,3 +38,8 @@ def ini_periods(self, **kwargs):
self.tstep_all = date_range(
datei, datef, period=self.periods, subperiod=freq
)
# Keep in memory whether a given period has a successor period
# The info is used by the adjoint to fetch or not the restart file
self.chain = {ddi: True for ddi in self.subsimu_dates[:-1]}
self.chain[self.subsimu_dates[-2]] = False
......@@ -23,7 +23,7 @@ def make_chemfields(self, data_in, input_type, ddi, ddf, runsubdir, mode):
if trid not in datastore:
continue
data = datastore[trid]["data"]
data = datastore[trid]["data"][ddi]
# If tangent-linear mode, include tl increments
if "incr" not in data or mode != "tl":
......@@ -40,9 +40,9 @@ def make_chemfields(self, data_in, input_type, ddi, ddf, runsubdir, mode):
self.prodloss3d.write(bin_file, ds)
else:
self.prescrconcs.write(bin_file, ds, 'bin')
dsphy = xr.Dataset({k[1]: datastore[k]['data']["spec"]
dsphy = xr.Dataset({k[1]: datastore[k]['data'][ddi]["spec"]
for k in datastore})
self.prescrconcs.write(nc_file, dsphy , 'nc')
self.prescrconcs.write(nc_file, dsphy, 'nc')
def make_Jfields(self, data_in, input_type, ddi, ddf, runsubdir, mode):
......
......@@ -26,16 +26,15 @@ def make_fluxes(self, datastore, ddi, ddf, runsubdir, mode):
info("LMDZ is generating flux inputs for {}".format(spec))
data = datastore[("fluxes", spec)]["data"]
data = datastore[("fluxes", spec)]["data"][ddi]
# If not determined by the control vector
if "spec" not in data:
data["spec"] = self.fluxes.read(
spec,
data["dirorig"],
data["fileorig"],
data["varname"],
self.input_dates[ddi],
datastore[("fluxes", spec)]["varname"],
datastore[("fluxes", spec)]["input_dates"][ddi],
datastore[("fluxes", spec)]["input_files"][ddi],
)
# Adds empty increments if not available
......
......@@ -12,37 +12,20 @@ def make_inicond(self, datastore, datei, datef, runsubdir, mode):
ddi = min(datei, datef)
ddf = max(datei, datef)
# Reference initial condition file for looping sub-simulations
if hasattr(self, "chain"):
source = self.chain.strftime(
"{}/../chain/restart_%Y%m%d%H%M.nc".format(runsubdir)
)
target = "{}/start.nc".format(runsubdir)
path.link(source, target)
if mode == "tl":
source = self.chain.strftime(
"{}/../chain/restart_tl_%Y%m%d%H%M.bin".format(runsubdir)
)
target = "{}/start_tl.bin".format(runsubdir)
path.link(source, target)
return
# Generating reference initial conditions if first sub-simulation
for spec in self.chemistry.acspecies.attributes:
restartID = getattr(self.chemistry.acspecies, spec).restart_id
if ("inicond", spec) not in datastore:
continue
ini = datastore[("inicond", spec)]["data"]
ini = datastore[("inicond", spec)]["data"][ddi]
# Copy from existing file if not loaded in datastore
target = "{}/start.nc".format(runsubdir)
target_tl = "{}/start_tl.nc".format(runsubdir)
if "spec" not in ini:
ref_dir = ini["dirorig"]
ref_file = ini["fileorig"]
ref_dir = datastore[("inicond", spec)]["dirorig"]
ref_file = datastore[("inicond", spec)]["fileorig"]
# Copy if does not already exists
source = ddi.strftime("{}/{}".format(ref_dir, ref_file))
......
import filecmp
import os
import shutil
import pathlib
import numpy as np
from netCDF4 import Dataset
import xarray as xr
from logging import warning
from pycif.utils import path
def make_endconcs(self, datastore, runsubdir, mode, datei, datef, onlyinit):
print(__file__)
import code
code.interact(local=dict(locals(), **globals()))
# If adjoint and only init, fetching initial concentrations from forward
# If fwd or tl and not only init, fetch initial concentrations from end
# of previous simulation
if (onlyinit and mode == "adj") or (not onlyinit and mode != "adj"):
for trid in datastore:
fileout = "{}/start.nc".format(runsubdir)
fileorig = "{}/{}".format(
self.adj_refdir,
datastore[trid]["data"][datei]["fileorig"])
path.link(fileorig, fileout)
# TODO: adapt to include start_tl.nc
if not onlyinit and mode == "adj":
# Fixed name for restart files
fileout = "{}/start.nc".format(runsubdir)
#
# for trid in datastore:
# fileorig = "{}/../{}".format(
# runsubdir,
# datastore[trid][max(datei, datef)]["fileorig"])
# path.link(fileorig, fileout)
print(__file__)
import code
code.interact(local=dict(locals(), **globals()))
source = self.chain.strftime(
"{}/../chain/restart_%Y%m%d%H%M.nc".format(runsubdir)
)
target = "{}/start.nc".format(runsubdir)
path.link(source, target)
if mode == "tl":
source = self.chain.strftime(
"{}/../chain/restart_tl_%Y%m%d%H%M.bin".format(runsubdir)
)
target = "{}/start_tl.bin".format(runsubdir)
path.link(source, target)
\ No newline at end of file
......@@ -3,6 +3,7 @@ from .inputs.chemfields import make_Jfields
from .inputs.fluxes import make_fluxes
from .inputs.inicond import make_inicond
from .inputs.obs import make_obs
from .inputs.make_endconcs import make_endconcs
def native2inputs(
......@@ -63,6 +64,10 @@ def native2inputs(
elif input_type == "inicond":
make_inicond(self, datastore, datei, datef, runsubdir, mode)
# Initial conditions from previous simulations
elif input_type == "endconcs":
make_endconcs(self, datastore, runsubdir, mode, ddi, ddf, onlyinit)
# Deals with photolysis rates
elif input_type == "kinetic":
make_Jfields(self, datastore, input_type, ddi, ddf, runsubdir, mode)
......