Commit f9c8180d authored by Antoine Berchet's avatar Antoine Berchet
Browse files

Merge branch 'LSCE' of gitlab.in2p3.fr:satinv/cif into LSCE

parents 644db6d6 36ef7c3c
Pipeline #146 failed with stages
in 0 seconds
......@@ -176,7 +176,12 @@ article:
# - apt-get update
- pip freeze
script:
- tox -e py38 -e coverage -- -m "(dummy and article and inversion and not adjtltest and not uncertainties) or (fwd and ref_config)"
- |
if [ ${CI_COMMIT_BRANCH} == "LSCE" ]; then
tox -e py38 -e coverage -- -m "(dummy and article and inversion and not adjtltest and not uncertainties) or (fwd and ref_config) or (allsimulations)";
else
tox -e py38 -e coverage -- -m "(dummy and article and inversion and not adjtltest and not uncertainties and bands) or (fwd and ref_config)";
fi;
after_script:
- mkdir -p coverage
- xmlstarlet sel -t -v "//coverage/@line-rate" reports/coverage.xml > coverage/.current_coverage
......@@ -195,8 +200,6 @@ article:
- coverage_raw
- examples_artifact
- figures_artifact
only:
- LSCE
article_uncertainties:
stage: test
......@@ -207,7 +210,12 @@ article_uncertainties:
# - apt-get update
- pip freeze
script:
- tox -e py38 -e coverage -- -m "(dummy and article and inversion and not adjtltest and uncertainties) or (fwd and ref_config)"
- |
if [ ${CI_COMMIT_BRANCH} == "LSCE" ]; then
tox -e py38 -e coverage -- -m "(dummy and article and inversion and not adjtltest and uncertainties) or (fwd and ref_config) or (allsimulations)";
else
tox -e py38 -e coverage -- -m "(dummy and article and inversion and not adjtltest and uncertainties and bands) or (fwd and ref_config)";
fi;
after_script:
- mkdir -p coverage
- xmlstarlet sel -t -v "//coverage/@line-rate" reports/coverage.xml > coverage/.current_coverage
......@@ -226,8 +234,6 @@ article_uncertainties:
- coverage_raw
- examples_artifact
- figures_artifact
only:
- LSCE
# Run the tests for the dummy model
tests_dummy:
......@@ -297,6 +303,7 @@ tests_chimere:
# Run the tests for flexpart (include downloading data)
tests_flexpart:
stage: test
retry: 2
image:
name: pycif/pycif-ubuntu:0.1
entrypoint: [""]
......
#QSUB -s /bin/tcsh
#PBS -q longp
#PBS -l nodes=1:ppn=8
#PBS -q xlongp
#PBS -l nodes=1:ppn=10
python -m pycif /home/users/aberchet/CIF/config_files/tuto_chimere/config_chimere_argonaut_n2o_sat_inv_corr.yml
#python -m pycif /homen/users/aberchet/CIF/config_files/tuto_chimere/config_chimere_EUROCOM_satOMI.yml
python -m pycif /home/users/aberchet/CIF/config_files/RECAPP/config_chimere_fwd_EUROCOM_CO2_AB_6mois_TNO_2008_1.yml
#QSUB -s /bin/tcsh
#PBS -q xlongp
#PBS -l nodes=1:ppn=10
#PBS -N RECAPP_2008_1
python -m pycif /home/users/aberchet/CIF/config_files/RECAPP/config_chimere_fwd_EUROCOM_CO2_AB_6mois_TNO_2008_1.yml
......@@ -16,10 +16,10 @@ export PYCIF_DATATEST=/home/chimereges/PYCIF_TEST_DATA/
export PYCIF_PLATFORM=LSCE
###
# select a subset of tests to run by using the tags ("mark")
#mark="(dummy and article and inversion and not adjtltest and not uncertainties) or (fwd and ref_config)"
mark="(dummy and article and inversion and not adjtltest and not uncertainties) or (fwd and ref_config)"
#mark="(dummy and article and inversion and not adjtltest and uncertainties) or (fwd and ref_config)"
#mark="(fwd and ref_config)"
mark="test_in_ci and dummy"
#mark="test_in_ci and dummy"
#mark="test_in_ci and chimere"
#mark="chimere and argfrsd and fwd"
#mark="tm5 and test_in_ci and fwd"
......
......@@ -289,9 +289,10 @@ def doc_from_arginputs(input_arguments, write_headers=True):
towrite.append(" **accepted values**: {}".format(accepted))
elif type(accepted) is dict:
towrite.append(" **accepted values**:")
towrite.append("")
for key in accepted:
towrite.append(" {}: {}".format(key, accepted[key]))
towrite.append(" - {}: {}".format(key, accepted[key]))
else:
towrite.append(" **accepted type**: {}".format(accepted))
......@@ -318,7 +319,6 @@ def process_pycif_keywords(app, what, obj_name, obj, options, lines):
- default_values
- mandatory_values
"""
ref_lines = copy.deepcopy(lines)
# Adding bash highlight by default
......@@ -403,9 +403,30 @@ def process_pycif_keywords(app, what, obj_name, obj, options, lines):
preftree = key_req.get("preftree", "")
empty = key_req.get("empty", False)
name = key_req.get("name", None)
version = key_req.get("version", "")
plg_type = Plugin.plugin_types[key_req.get("type", req)][1]
plg_path = Plugin.plugin_types[key_req.get("type", req)][0][1:]
version = key_req.get("version", None)
req_type = key_req.get("type", req)
req_subtype = key_req.get("subtype", "")
# Load required plugin to deal with types and sub-types
plg_req = Plugin.from_dict({
"plugin": {
"name": name,
"version": version,
"type": req_type,
"subtype": req_subtype
}
})
plg_req._load_plugin_type(req)
plg_type = \
Plugin.plugin_types[plg_req.plugin.type][1]
plg_path = \
Plugin.plugin_types[plg_req.plugin.type][0][1:]
plg_subtype = \
Plugin.plugin_subtypes[
plg_req.plugin.type][
plg_req.plugin.subtype][1:]
# String to dump
newplg = key_req.get("newplg", False)
towrite.extend((
" * - {}\n"
......@@ -534,7 +555,7 @@ def build_rst_from_plugins(app):
init_dir(plg_dir)
# Initialize index
towrite = [
towrite_overall_index = [
"##################",
"Plugins in pyCIF",
"##################",
......@@ -544,12 +565,7 @@ def build_rst_from_plugins(app):
"",
" ../plugin_description",
" ../dependencies"
] + [
" {}/index".format(Plugin.plugin_types[plg_type][0][1:])
for plg_type in Plugin.plugin_types
]
with open("{}/index.rst".format(plg_dir), "w") as f:
f.write("\n".join(towrite))
# Loop on all plugin types
for plg_type in Plugin.plugin_types:
......@@ -559,7 +575,7 @@ def build_rst_from_plugins(app):
Plugin.plugin_types[plg_type][0])
class_module = pkgutil.importlib.import_module(class_path)
local_class = getattr(class_module, class_name)
# Create directory
plg_type_dir = "{}/{}".format(
plg_dir, Plugin.plugin_types[plg_type][0][1:])
......@@ -567,29 +583,82 @@ def build_rst_from_plugins(app):
# Loop over modules of this class
package_path = "pycif.plugins{}".format(Plugin.plugin_types[plg_type][0])
if pkgutil.importlib.util.find_spec(package_path) is None:
continue
# Update overall index
towrite_overall_index.append(
" {}/index".format(Plugin.plugin_types[plg_type][0][1:]))
# Loop over sub-types
import_package = pkgutil.importlib.import_module(package_path)
package_index = []
for mod in pkgutil.walk_packages(import_package.__path__,
prefix=import_package.__name__ + "."):
if not mod.ispkg:
continue
for subtype in Plugin.plugin_subtypes[plg_type]:
local_subpackage = "{}{}".format(
package_path,
Plugin.plugin_subtypes[plg_type][subtype])
import_subpackage = pkgutil.importlib.import_module(local_subpackage)
# Create directory
plg_subtype_dir = "{}/{}".format(
plg_type_dir,
Plugin.plugin_subtypes[plg_type][subtype][1:])
init_dir(plg_subtype_dir)
loc_mod = pkgutil.importlib.import_module(mod.name)
# Register modules only when a name is given
if not hasattr(loc_mod, "_name"):
continue
# Loop over modules in the sub-type
package_subindex = []
package_subtitles = []
for mod in pkgutil.walk_packages(import_subpackage.__path__,
prefix=import_subpackage.__name__ + "."):
if not mod.ispkg:
continue
loc_mod = pkgutil.importlib.import_module(mod.name)
# Register modules only when a name is given
if not hasattr(loc_mod, "_name"):
continue
# Create corresponding rst file
file_name = "{}/{}.rst".format(
plg_subtype_dir, loc_mod.__name__.split(".")[-1])
# Create corresponding rst file
file_name = "{}/{}.rst".format(
plg_type_dir, loc_mod.__name__.split(".")[-1])
title = ":bash:`{}` / :bash:`{}`".format(
loc_mod._name, getattr(loc_mod, "_version", "std"))
title = ":bash:`{}` / :bash:`{}`".format(
loc_mod._name, getattr(loc_mod, "_version", "std"))
if hasattr(loc_mod, "_fullname"):
title = "{} ({})".format(loc_mod._fullname, title)
towrite = [
".. role:: bash(code)",
" :language: bash",
"",
"",
len(title) * "#",
title,
len(title) * "#",
"",
".. automodule:: {}".format(loc_mod.__name__)
]
with open(file_name, "w") as f:
f.write("\n".join(towrite))
# Append name for plugin type index
package_subindex.append(loc_mod.__name__.split(".")[-1])
package_subtitles.append(getattr(loc_mod, "_fullname", loc_mod._name))
# Sort names
package_subindex = [
x for _,x in sorted(zip(package_subtitles, package_subindex))]
if hasattr(loc_mod, "_fullname"):
title = "{} ({})".format(loc_mod._fullname, title)
# Write the plugin type index
if subtype == "":
continue
title = list(subtype)
title[0] = title[0].upper()
title = "".join(title)
towrite = [
".. role:: bash(code)",
" :language: bash",
......@@ -598,54 +667,86 @@ def build_rst_from_plugins(app):
len(title) * "#",
title,
len(title) * "#",
""] + ([".. contents:: Contents", " :local:", ""]
if import_subpackage.__doc__ is not None else []) + [
"Available {}".format(title),
(len(title) + 11) * "=",
"",
".. automodule:: {}".format(loc_mod.__name__)
]
with open(file_name, "w") as f:
"The following :bash:`{}` of sub-type {} "
"are implemented in pyCIF so far:".format(
Plugin.plugin_types[plg_type][0][1:],
subtype),
"",
".. toctree::",
"",
] + [
" {}".format(plg) for plg in package_subindex
] + (
import_subpackage.__doc__.split('\n')
if import_subpackage.__doc__ is not None
else []
)
with open("{}/index.rst".format(plg_subtype_dir), "w") as f:
f.write("\n".join(towrite))
# Append name for plugin type index
package_index.append(loc_mod.__name__.split(".")[-1])
# Sort names
package_index.sort()
# Write the plugin type index
title = list(Plugin.plugin_types[plg_type][0][1:])
title[0] = title[0].upper()
title = "".join(title) + " (:bash:`{}`)".format(plg_type)
towrite = [
".. role:: bash(code)",
" :language: bash",
"",
"",
len(title) * "#",
title,
len(title) * "#",
""] + ([
".. contents:: Contents",
" :local:",
""
] if import_package.__doc__ is not None else []) + [
"Available {}".format(title),
(len(title) + 11) * "=",
"",
"The following :bash:`{}` are implemented in pyCIF so far:".format(
Plugin.plugin_types[plg_type][0][1:]),
"",
".. toctree::",
"",
] + [
" {}".format(plg) for plg in package_index
] + (
".. role:: bash(code)",
" :language: bash",
"",
"",
len(title) * "#",
title,
len(title) * "#",
""] + ([
".. contents:: Contents",
" :local:",
""
] if import_package.__doc__ is not None else []) + [
"Available {}".format(title),
(len(title) + 11) * "=",
""]
# If only one sub-type, just create an index of all available plugins
if len(Plugin.plugin_subtypes[plg_type]) == 1:
towrite.extend([
"The following :bash:`{}` are implemented in pyCIF so far:".format(
Plugin.plugin_types[plg_type][0][1:]),
"",
".. toctree::",
"",
] + [" {}".format(plg) for plg in package_subindex])
# If sub-types create an index pointing to sub-types and plugins
else:
towrite.extend([
"The following sub-types and :bash:`{}` are implemented "
"in pyCIF so far:".format(
Plugin.plugin_types[plg_type][0][1:]),
"",
".. toctree::",
"",
] + [" {}/index".format(Plugin.plugin_subtypes[plg_type][subtype][1:])
for subtype in Plugin.plugin_subtypes[plg_type]
])
# Append overall type description
towrite.extend(
import_package.__doc__.split('\n')
if import_package.__doc__ is not None
else []
)
else [])
# Dump the string to the rst file
with open("{}/index.rst".format(plg_type_dir), "w") as f:
f.write("\n".join(towrite))
# Dump the overall index
with open("{}/index.rst".format(plg_dir), "w") as f:
f.write("\n".join(towrite_overall_index))
# Generate available list
s = StringIO()
Plugin.print_registered(print_rst=True, print_requirement=True, stream=s)
......
#############################
Developments around CHIMERE
############################
#############################
.. role:: bash(code)
:language: bash
......
......@@ -27,7 +27,7 @@ Example: for a CTM with emitted species,
.. code-block:: python
emis = {
("fluxes", s): dict_surface
("flux", s): dict_surface
for s in model.chemistry.emis_species.attributes
}
......
.. role:: bash(code)
:language: bash
.. currentmodule:: pycif.plugins.fields.bc_plugin_template
.. currentmodule:: pycif.plugins.datastreams.fields.bc_plugin_template
Run pycif with this yaml: the new plugin will simply perform what is in the template i.e. print some instructions on what you have to do where. The following codes must be developped in the places matching the instructions - and checked. To check that each new code works as intended, run the CIF with the yaml using the new plugin and with the same yaml but using a known plugin with print statements. The scripts have to be developped in this order:
......
......@@ -2,9 +2,6 @@
:language: bash
Have a yaml file ready with a simulation that works with known plugins.
For the :doc:`obsoperator</documentation/plugins/obsoperators/index>`,
choose the optional argument :bash:`onlyinit` so that only the inputs are computed
XXXX CHECK THIS OPTION ACTUALLY DOES THISXXXX, not the whole simulation.
.. code-block:: yaml
......
......@@ -47,7 +47,7 @@ XXXXXXX what about the input arguments? Ils demandent une partie dediee!?XXXXXXX
Template plugin for BCs
########################
.. automodule:: pycif.plugins.fields.bc_plugin_template
.. automodule:: pycif.plugins.datastreams.fields.bc_plugin_template
c) add the reference to the rst file in docs/source/documentation/plugins/fields/index.rst:
......
......@@ -5,76 +5,348 @@ How to add a new type of flux data to be processed by the CIF into a model's inp
.. role:: bash(code)
:language: bash
0. .. include:: ../newBCdata/knownplugin.rst
Pre-requisites
================
Before starting to implement a new flux plugin, you must have:
1. In directory :bash:`plugins/fluxes`, copy the directory containing the template for a flux plugin :bash:`flux_plugin_template` in the directory for your new plugin.
- a yaml file ready with a simulation that works with known plugins.
- a folder where the data you need to implement is stored
- basic information about the data you need to implement (licensing, format, etc.)
.. include:: ../newBCdata/register.rst
We help you below to navigate through different documentation pages to implement your plugin.
The main reference pages are :doc:`the datastream documentation page </documentation/plugins/datastreams/index>`
and :doc:`the flux template documentation page</documentation/plugins/datastreams/fluxes/flux_plugin_template>`.
Switch from working fluxes to the reference template
=====================================================
2. Modify the yaml file to use the new plugin: the minimum input arguments are :bash:`dir`, :bash:`file`, :bash:`varname` and :bash:`unit_conversion`. The default space and time interpolations will be applied (see XXXX doc sur premiere simu directe avec exmeple yaml quand mise a jourXXXXX).
The :bash:`datavect` paragraph of your working yaml should look like that:
.. code-block:: yaml
.. container:: toggle
components:
fluxes:
.. container:: header
Example with CHIMERE
.. code-block:: yaml
:linenos:
datavect:
plugin:
name: fluxes
version: template
type: fluxes
dir: dir_with_original_files/
file: file_with_new_fluxes_to_use_as_inputs
varname: NAMEORIG
unit_conversion:
scale: 1.
name: standard
version: std
components:
flux:
parameters:
CO2:
plugin:
name: CHIMERE
type: flux
version: AEMISSIONS
file_freq: 120H
dir: some_dir
file: some_file
Do the following to make it work with the template flux:
1. follow the initial steps in :doc:`the flux template documentation page</documentation/plugins/datastreams/fluxes/flux_plugin_template>`
to initialize your new plugin and register it.
It includes copying the template folder to a new path and changing the variables
:bash:`_name`, :bash:`_fullname` and :bash:`_version` in the file :bash:`__init__.py`
2. update your Yaml to use the template flux (renamed with your preference). It should now look like that:
.. container:: toggle
.. container:: header
Show/Hide Code
.. code-block:: yaml
:linenos:
datavect:
plugin:
name: standard
version: std
components:
flux:
parameters:
CO2:
plugin:
name: your_new_name
type: flux
version: your_version
3. Test running again your test case. It should generate fluxes with random values as in the template
Document your plugin
====================
Before going further, be sure to document your plugin properly.
To do so, please replace the docstring header in the file :bash:`__init__.py`.
Include the following information:
- licensing information
- permanent link to download the data (or a contact person if no link is publicly available)
- data format (temporal and horizontal resolution, names and shape of the data files)
- any specific treatment that prevents the plugin from working with another type of files.
Build and check the documentation
=================================
Before going further, please compile the documentation and check that your new plugin
appears in the list of datastreams plugins :doc:`here</documentation/plugins/datastreams/index>`.
Also check that the documentation of your new plugin is satisfactory.
To compile the documentation, use the command:
.. code-block:: bash
cd $CIF_root/docs
make html
Further details can be found :doc:`here</contrib_doc>`.
Updating functions and data to implement your flux data
=======================================================
Your new plugin will need functions to be coded to work.
fetch
------
The :bash:`fetch` function determines what files and corresponding dates are available
for running the present case.
The structure of the :bash:`fetch` function is shown here: :ref:`datastreams-fetch-funtions`.
Please read carefully all explanations therein before starting implementing your case.
By default, the :bash:`fetch` function will use the arguments :bash:`dir` and :bash:`file` in your yaml.
Make sure to update your yaml accordingly:
.. container:: toggle
.. container:: header
Show/Hide Code
.. code-block:: yaml
:linenos:
datavect:
plugin:
name: standard
version: std
components:
flux:
parameters:
CO2:
plugin:
name: your_new_name
type: flux
version: your_version
dir: path_to_data
file: file_name
Depending on how you implement your data stream, extra parameters may be needed.
Please document them on-the-fly in the :bash:`input_arguments` variable in :bash:`__init__.py`.
One classical parameter is :bash:`file_freq`, which gives the frequency of the input files
(independently to the simulation to be computed).
Once implemented, re-run your test case.
You can check that everything went as expected by checking: