Commit 7b10c2b3 authored by Antoine Berchet's avatar Antoine Berchet
Browse files

New classes fixed for documentation and initialization

parent c7486ef1
......@@ -318,7 +318,6 @@ def process_pycif_keywords(app, what, obj_name, obj, options, lines):
- default_values
- mandatory_values
"""
ref_lines = copy.deepcopy(lines)
# Adding bash highlight by default
......@@ -403,9 +402,30 @@ def process_pycif_keywords(app, what, obj_name, obj, options, lines):
preftree = key_req.get("preftree", "")
empty = key_req.get("empty", False)
name = key_req.get("name", None)
version = key_req.get("version", "")
plg_type = Plugin.plugin_types[key_req.get("type", req)][1]
plg_path = Plugin.plugin_types[key_req.get("type", req)][0][1:]
version = key_req.get("version", None)
req_type = key_req.get("type", req)
req_subtype = key_req.get("subtype", "")
# Load required plugin to deal with types and sub-types
plg_req = Plugin.from_dict({
"plugin": {
"name": name,
"version": version,
"type": req_type,
"subtype": req_subtype
}
})
plg_req._load_plugin_type(req)
plg_type = \
Plugin.plugin_types[plg_req.plugin.type][1]
plg_path = \
Plugin.plugin_types[plg_req.plugin.type][0][1:]
plg_subtype = \
Plugin.plugin_subtypes[
plg_req.plugin.type][
plg_req.plugin.subtype][1:]
# String to dump
newplg = key_req.get("newplg", False)
towrite.extend((
" * - {}\n"
......@@ -534,7 +554,7 @@ def build_rst_from_plugins(app):
init_dir(plg_dir)
# Initialize index
towrite = [
towrite_overall_index = [
"##################",
"Plugins in pyCIF",
"##################",
......@@ -544,12 +564,7 @@ def build_rst_from_plugins(app):
"",
" ../plugin_description",
" ../dependencies"
] + [
" {}/index".format(Plugin.plugin_types[plg_type][0][1:])
for plg_type in Plugin.plugin_types
]
with open("{}/index.rst".format(plg_dir), "w") as f:
f.write("\n".join(towrite))
# Loop on all plugin types
for plg_type in Plugin.plugin_types:
......@@ -559,7 +574,7 @@ def build_rst_from_plugins(app):
Plugin.plugin_types[plg_type][0])
class_module = pkgutil.importlib.import_module(class_path)
local_class = getattr(class_module, class_name)
# Create directory
plg_type_dir = "{}/{}".format(
plg_dir, Plugin.plugin_types[plg_type][0][1:])
......@@ -567,29 +582,79 @@ def build_rst_from_plugins(app):
# Loop over modules of this class
package_path = "pycif.plugins{}".format(Plugin.plugin_types[plg_type][0])
if pkgutil.importlib.util.find_spec(package_path) is None:
continue
# Update overall index
towrite_overall_index.append(
" {}/index".format(Plugin.plugin_types[plg_type][0][1:]))
# Loop over sub-types
import_package = pkgutil.importlib.import_module(package_path)
package_index = []
for mod in pkgutil.walk_packages(import_package.__path__,
prefix=import_package.__name__ + "."):
if not mod.ispkg:
continue
for subtype in Plugin.plugin_subtypes[plg_type]:
local_subpackage = "{}{}".format(
package_path,
Plugin.plugin_subtypes[plg_type][subtype])
import_subpackage = pkgutil.importlib.import_module(local_subpackage)
# Create directory
plg_subtype_dir = "{}/{}".format(
plg_type_dir,
Plugin.plugin_subtypes[plg_type][subtype][1:])
init_dir(plg_subtype_dir)
loc_mod = pkgutil.importlib.import_module(mod.name)
# Register modules only when a name is given
if not hasattr(loc_mod, "_name"):
continue
# Loop over modules in the sub-type
package_subindex = []
for mod in pkgutil.walk_packages(import_subpackage.__path__,
prefix=import_subpackage.__name__ + "."):
if not mod.ispkg:
continue
loc_mod = pkgutil.importlib.import_module(mod.name)
# Register modules only when a name is given
if not hasattr(loc_mod, "_name"):
continue
# Create corresponding rst file
file_name = "{}/{}.rst".format(
plg_subtype_dir, loc_mod.__name__.split(".")[-1])
# Create corresponding rst file
file_name = "{}/{}.rst".format(
plg_type_dir, loc_mod.__name__.split(".")[-1])
title = ":bash:`{}` / :bash:`{}`".format(
loc_mod._name, getattr(loc_mod, "_version", "std"))
title = ":bash:`{}` / :bash:`{}`".format(
loc_mod._name, getattr(loc_mod, "_version", "std"))
if hasattr(loc_mod, "_fullname"):
title = "{} ({})".format(loc_mod._fullname, title)
towrite = [
".. role:: bash(code)",
" :language: bash",
"",
"",
len(title) * "#",
title,
len(title) * "#",
"",
".. automodule:: {}".format(loc_mod.__name__)
]
with open(file_name, "w") as f:
f.write("\n".join(towrite))
# Append name for plugin type index
package_subindex.append(loc_mod.__name__.split(".")[-1])
# Sort names
package_subindex.sort()
if hasattr(loc_mod, "_fullname"):
title = "{} ({})".format(loc_mod._fullname, title)
# Write the plugin type index
if subtype == "":
continue
title = list(subtype)
title[0] = title[0].upper()
title = "".join(title)
towrite = [
".. role:: bash(code)",
" :language: bash",
......@@ -598,54 +663,86 @@ def build_rst_from_plugins(app):
len(title) * "#",
title,
len(title) * "#",
""] + ([".. contents:: Contents", " :local:", ""]
if import_subpackage.__doc__ is not None else []) + [
"Available {}".format(title),
(len(title) + 11) * "=",
"",
".. automodule:: {}".format(loc_mod.__name__)
]
with open(file_name, "w") as f:
"The following :bash:`{}` of sub-type {} "
"are implemented in pyCIF so far:".format(
Plugin.plugin_types[plg_type][0][1:],
subtype),
"",
".. toctree::",
"",
] + [
" {}".format(plg) for plg in package_subindex
] + (
import_subpackage.__doc__.split('\n')
if import_subpackage.__doc__ is not None
else []
)
with open("{}/index.rst".format(plg_subtype_dir), "w") as f:
f.write("\n".join(towrite))
# Append name for plugin type index
package_index.append(loc_mod.__name__.split(".")[-1])
# Sort names
package_index.sort()
# Write the plugin type index
title = list(Plugin.plugin_types[plg_type][0][1:])
title[0] = title[0].upper()
title = "".join(title) + " (:bash:`{}`)".format(plg_type)
towrite = [
".. role:: bash(code)",
" :language: bash",
"",
"",
len(title) * "#",
title,
len(title) * "#",
""] + ([
".. contents:: Contents",
" :local:",
""
] if import_package.__doc__ is not None else []) + [
"Available {}".format(title),
(len(title) + 11) * "=",
"",
"The following :bash:`{}` are implemented in pyCIF so far:".format(
Plugin.plugin_types[plg_type][0][1:]),
"",
".. toctree::",
"",
] + [
" {}".format(plg) for plg in package_index
] + (
".. role:: bash(code)",
" :language: bash",
"",
"",
len(title) * "#",
title,
len(title) * "#",
""] + ([
".. contents:: Contents",
" :local:",
""
] if import_package.__doc__ is not None else []) + [
"Available {}".format(title),
(len(title) + 11) * "=",
""]
# If only one sub-type, just create an index of all available plugins
if len(Plugin.plugin_subtypes[plg_type]) == 1:
towrite.extend([
"The following :bash:`{}` are implemented in pyCIF so far:".format(
Plugin.plugin_types[plg_type][0][1:]),
"",
".. toctree::",
"",
] + [" {}".format(plg) for plg in package_subindex])
# If sub-types create an index pointing to sub-types and plugins
else:
towrite.extend([
"The following sub-types and :bash:`{}` are implemented "
"in pyCIF so far:".format(
Plugin.plugin_types[plg_type][0][1:]),
"",
".. toctree::",
"",
] + [" {}/index".format(Plugin.plugin_subtypes[plg_type][subtype][1:])
for subtype in Plugin.plugin_subtypes[plg_type]
])
# Append overall type description
towrite.extend(
import_package.__doc__.split('\n')
if import_package.__doc__ is not None
else []
)
else [])
# Dump the string to the rst file
with open("{}/index.rst".format(plg_type_dir), "w") as f:
f.write("\n".join(towrite))
# Dump the overall index
with open("{}/index.rst".format(plg_dir), "w") as f:
f.write("\n".join(towrite_overall_index))
# Generate available list
s = StringIO()
Plugin.print_registered(print_rst=True, print_requirement=True, stream=s)
......
#############################
Developments around CHIMERE
############################
#############################
.. role:: bash(code)
:language: bash
......
......@@ -27,7 +27,7 @@ Example: for a CTM with emitted species,
.. code-block:: python
emis = {
("fluxes", s): dict_surface
("flux", s): dict_surface
for s in model.chemistry.emis_species.attributes
}
......
.. role:: bash(code)
:language: bash
.. currentmodule:: pycif.plugins.fields.bc_plugin_template
.. currentmodule:: pycif.plugins.datastreams.fields.bc_plugin_template
Run pycif with this yaml: the new plugin will simply perform what is in the template i.e. print some instructions on what you have to do where. The following codes must be developped in the places matching the instructions - and checked. To check that each new code works as intended, run the CIF with the yaml using the new plugin and with the same yaml but using a known plugin with print statements. The scripts have to be developped in this order:
......
......@@ -47,7 +47,7 @@ XXXXXXX what about the input arguments? Ils demandent une partie dediee!?XXXXXXX
Template plugin for BCs
########################
.. automodule:: pycif.plugins.fields.bc_plugin_template
.. automodule:: pycif.plugins.datastreams.fields.bc_plugin_template
c) add the reference to the rst file in docs/source/documentation/plugins/fields/index.rst:
......
......@@ -49,7 +49,7 @@ XXXXXXX what about the input arguements? Ils demandent une partie dediee!?XXXXXX
Template plugin for fluxes
###########################
.. automodule:: pycif.plugins.fluxes.flux_plugin_template
.. automodule:: pycif.plugins.datastreams.fluxes.flux_plugin_template
c) add the reference to the rst file in docs/source/documentation/plugins/fluxes/index.rst:
......
......@@ -75,7 +75,7 @@ Below is an example of requirements for the :bash:`model` CHIMERE:
"empty": False,
"any": False,
},
"fluxes": {
"flux": {
"name": "CHIMERE",
"version": "AEMISSIONS",
"empty": True,
......
"""
Description
============
The :bash:`datastream` Plugin type include interfaces to input data for pycif,
with the exception of observations.
It includes the sub-types :bash:`flux`, :bash:`meteo` and :bash:`field`.
It is used for the following purposes:
i) fetching relevant input files for direct use by, e.g, CTMs, only linking to the
original file
ii) reading relevant input files when data manipulation is required, for, e.g.,
defining the control vector, or auxiliary transformations, such as temporal
interpolation or horizontal regridding
iii) writing data from pycif to the corresponding format; this can either be used
when data from pycif needs to be read as input for a CTM, or for sharing data
from pycif with a known standard data format
Required parameters, dependencies and functions
===============================================
"""
\ No newline at end of file
......@@ -54,7 +54,7 @@ requirements = {
"version": "ic",
"empty": True,
"any": False,
"type": "fields",
"type": "field",
"newplg": True,
},
}
......
......@@ -126,11 +126,11 @@ class Plugin(object):
and (k[1] in versions or versions == [])
and (k[2] in types or types == [])
]
names, versions, types = list(zip(*keys))
names, versions, types, subtypes = list(zip(*keys))
modules = [
cls.get_registered(n, v, t)
for n, v, t in zip(names, versions, types)
cls.get_registered(n, v, t, st)
for n, v, t, st in zip(names, versions, types, subtypes)
]
print(
......@@ -146,40 +146,78 @@ class Plugin(object):
else:
print("\t", ":doc:`{}</documentation/plugins/{}/index>`"
.format(tt, rst_type), file=stream)
plg_type_list = sorted([
(n, v, t, mod)
for n, v, t, mod in zip(names, versions, types, modules)
(n, v, t, st, mod)
for n, v, t, st, mod in zip(names, versions, types, subtypes, modules)
if t == tt])
for n, v, t, mod in plg_type_list:
if not print_rst:
print("\t\t- {}, {}".format(n, v), file=stream)
else:
print("\t\t- :doc:`{}, {}</documentation/plugins/{}/{}>`"
.format(n, v, rst_type,
cls.registered[n, v, t].split(".")[-1]), file=stream)
# Print requirements
if print_requirement and hasattr(mod, "requirements"):
print("\t\t\tRequires:", file=stream)
for req in mod.requirements:
mod_req = mod.requirements[req]
name = mod_req.get("name", "")
version = mod_req.get("version", "")
any = mod_req.get("any", False)
empty = mod_req.get("empty", "")
req_type = mod_req.get("type", req)
if not print_rst:
print("\t\t\t\t- {}:".format(req), file=stream)
else:
req_rst_type = cls.plugin_types[req_type][0][1:]
print("\t\t\t\t"
"- :doc:`{}</documentation/plugins/{}/index>`:"
.format(req, req_rst_type), file=stream)
print("\t\t\t\t\t- name: {}".format(name), file=stream)
print("\t\t\t\t\t- version: {}".format(version), file=stream)
print("\t\t\t\t\t- any: {}".format(any), file=stream)
print("\t\t\t\t\t- empty: {}".format(empty), file=stream)
sub_types = sorted(list(set([t[3] for t in plg_type_list])))
for stt in sub_types:
rst_subtype = cls.plugin_subtypes[tt][stt][1:]
indent = "\t\t"
if len(sub_types) > 1:
indent = "\t\t\t"
if not print_rst:
print("\t\t - ", stt, file=stream)
else:
print("\t\t - ", ":doc:`{}</documentation/plugins/{}/{}/index>`"
.format(stt, rst_type, rst_subtype), file=stream)
plg_subtype_list = sorted([
(n, v, t, st, mod)
for n, v, t, st, mod in plg_type_list
if st == stt])
for n, v, t, st, mod in plg_subtype_list:
if not print_rst:
print("{}- {}, {}".format(indent, n, v), file=stream)
else:
print("{}- :doc:`{}, {}</documentation/plugins/{}/{}/{}>`"
.format(indent, n, v, rst_type, rst_subtype,
cls.registered[n, v, t, st].split(".")[-1]),
file=stream)
# Print requirements
if print_requirement and hasattr(mod, "requirements"):
print("{}\tRequires:".format(indent), file=stream)
for req in mod.requirements:
mod_req = mod.requirements[req]
name = mod_req.get("name", None)
version = mod_req.get("version", None)
any = mod_req.get("any", False)
empty = mod_req.get("empty", "")
req_type = mod_req.get("type", req)
req_subtype = mod_req.get("subtype", "")
if not print_rst:
print("{}\t\t- {}:".format(indent, req), file=stream)
else:
plg_req = cls.from_dict({
"plugin": {
"name": name,
"version": version,
"type": req_type,
"subtype": req_subtype
}
})
plg_req._load_plugin_type(req)
req_rst_type = \
cls.plugin_types[plg_req.plugin.type][0][1:]
req_rst_subtype = \
cls.plugin_subtypes[
plg_req.plugin.type][
plg_req.plugin.subtype][1:]
print("{}\t\t"
"- :doc:`{}</documentation/plugins/{}/{}/index>`:"
.format(indent, req,
req_rst_type, req_subtype), file=stream)
print("{}\t\t\t- name: {}".format(indent, name), file=stream)
print("{}\t\t\t- version: {}".format(indent, version), file=stream)
print("{}\t\t\t- any: {}".format(indent, any), file=stream)
print("{}\t\t\t- empty: {}".format(indent, empty), file=stream)
print("\n", file=stream)
......@@ -478,6 +516,15 @@ class Plugin(object):
if name is None and version is None:
if self.is_allowed(plg_type):
return plg_type
else:
matching = [t for t in self.plugin_subtypes
if plg_type in self.plugin_subtypes[t]]
if len(matching) == 1:
self.plugin.subtype = self.plugin.type
self.plugin.type = matching[0]
return self.plugin.type
raise Exception("There is some error in the definition of your Yaml or, if you "
"are a developer, in the new plugin you are designing: \n"
......@@ -489,7 +536,9 @@ class Plugin(object):
" type: {}\n"
" subtype: {}\n\n"
"Available types and subtypes are: \n{}\n"
"Please check spelling in your definition".format(
"Please check spelling in your definition\n\n"
"If you are initializing an empty plugin, please be sure "
"to define it with name = None AND version = None.".format(
key, name, version, plg_type, plg_subtype,
"\n".join(["- {}/{}".format(t, st)
for t in self.plugin_subtypes
......
......@@ -559,9 +559,6 @@ class Setup(Plugin):
# Error in the yaml if reaching this point
else:
print(__file__)
import code
code.interact(local=dict(locals(), **globals()))
raise PluginError(
"Plugin {} ({}/{}/{}) needs a plugin '{}/{}/{}' and an "
"inconsistent one was proposed in the Yaml".format(
......
......@@ -92,7 +92,7 @@ def ref_chimere(tmpdir):
"datavect": {
"plugin": {"name": "standard", "version": "std"},
"components": {
"fluxes": {
"flux": {
"dir": input_dir,
"file": "AEMISSIONS.%Y%m%d%H.3.nc",
"file_freq": "3H",
......
......@@ -48,7 +48,7 @@ def setup_argfrsd(ref_chimere, request):
datavect = config["datavect"]["components"]
# Test different options in fluxes
datavect["fluxes"] = {
datavect["flux"] = {
"dir": input_dir,
"file": "AEMISSIONS_"+domid+".%Y%m%d%H.24.nc",
"file_freq": "24H",
......
......@@ -30,7 +30,7 @@ def setup_argonaut(ref_chimere):
config["datavect"] = {
"plugin": {"name": "standard", "version": "std"},
"components": {
"fluxes": {
"flux": {
"dir": input_dir,
"parameters": {
"CH4": {
......
......@@ -18,11 +18,11 @@ def setup_edgar(setup_melchior):
config["chemistry"]["schemeid"] = "univ.melchior2"
datavect = config["datavect"]["components"]
datavect["fluxes"]["parameters"]["N2O"] = {
datavect["flux"]["parameters"]["N2O"] = {
"plugin": {
"name": "EDGAR",
"version": "v5",
"type": "fluxes"
"type": "flux"
},
"dir": "{}/EMISSIONS/EDGARV5/TOTAL/".format(raw_dir),
"regrid": {"method": "mass-conservation"},
......
......@@ -30,7 +30,7 @@ def setup_eurocom(ref_chimere):
config["datavect"] = {