Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • raven/raven-administration
1 result
Show changes
Commits on Source (3)
Showing
with 1228 additions and 16 deletions
......@@ -3,9 +3,10 @@
All notable changes to this project will be documented in this file.
Changes before version 3.1.0 is not included
## [Latest commit] - 2024-10-25
## [Latest commit]
- Added a notification at the top of the menu when a new version is available
- [2024-12-19] Added experimental generation of G. To enable it, add `?experimental=true` to `attainments` url
- [2024-10-25] Added a notification at the top of the menu when a new version is available
## [3.1.4] - 2024-10-24
......
......@@ -69,7 +69,7 @@ class Management:
# Validations raises an exception if it fails
if row.column_name.upper() == "BEGIN_POSITION" or row.column_name.upper() == "END_POSITION":
pd.to_datetime(self.df[self.df[row.column_name].notna()][row.column_name], format="%Y-%m-%dT%H:%M:%S%Z")
pd.to_datetime(self.df[self.df[row.column_name].notna()][row.column_name], format="%Y-%m-%dT%H:%M:%S%z")
elif row.data_type.startswith("int"):
self.df[self.df[row.column_name].notna()][row.column_name].astype(int)
......
......@@ -9,8 +9,8 @@ class Common:
def validate_dataframe(df_values: DataFrame):
bench = time.perf_counter()
# Validations raises an exception if it fails
df_values["begin_position"] = pd.to_datetime(df_values["begin_position"], format="%Y-%m-%dT%H:%M:%S%Z")
df_values["end_position"] = pd.to_datetime(df_values["end_position"], format="%Y-%m-%dT%H:%M:%S%Z")
df_values["begin_position"] = pd.to_datetime(df_values["begin_position"], format="%Y-%m-%dT%H:%M:%S%z")
df_values["end_position"] = pd.to_datetime(df_values["end_position"], format="%Y-%m-%dT%H:%M:%S%z")
df_values.sampling_point_id.astype(str)
df_values.value = df_values.value.astype(float)
df_values.verification_flag.astype(int)
......
......@@ -15,6 +15,7 @@ class Filling:
for key, values in timeseries:
scaled_value = -9900 if values.scaled_value.iloc[0] != None else None
tz = values.end_position.iloc[0].tz
tz_seconds = tz.utcoffset(values.end_position.iloc[0]).seconds
ts_from_epoch = values.ts_from_epoch.iloc[0] if pd.notna(values.ts_from_epoch.iloc[0]) else None
ts_to_epoch = values.ts_to_epoch.iloc[0] if pd.notna(values.ts_to_epoch.iloc[0]) else None
......@@ -23,7 +24,7 @@ class Filling:
if ts_timestep == -1:
continue
dates = values.end_position.apply(lambda x: x.timestamp()+tz._offset.seconds).unique()
dates = values.end_position.apply(lambda x: x.timestamp()+tz_seconds).unique()
from_time = int(dates.min() if ts_to_epoch == None else dates.min() if ts_to_epoch > dates.min() else ts_to_epoch)
to_time = int(dates.max() if ts_to_epoch == None else dates.max() if ts_to_epoch < dates.max() else ts_from_epoch if ts_from_epoch > dates.max() else dates.max())
......@@ -35,8 +36,8 @@ class Filling:
for m in missing_dates:
v = {
"sampling_point_id": key,
"begin_position": pd.to_datetime(datetime.fromtimestamp(m-ts_timestep-tz._offset.seconds, tz=tz)), # pd.to_datetime(datetime.fromtimestamp(m-ts_timestep, tz=tz)),
"end_position": pd.to_datetime(datetime.fromtimestamp(m-tz._offset.seconds, tz=tz)),
"begin_position": pd.to_datetime(datetime.fromtimestamp(m-ts_timestep-tz_seconds, tz=tz)), # pd.to_datetime(datetime.fromtimestamp(m-ts_timestep, tz=tz)),
"end_position": pd.to_datetime(datetime.fromtimestamp(m-tz_seconds, tz=tz)),
"value": -9900,
"verification_flag": 3,
"validation_flag": -1,
......
......@@ -57,8 +57,8 @@ class Flagging:
cursor.execute(sql, {"id": sampling_point_id, "dt_from": dt_from, "value": value})
row = cursor.fetchone()
if row != None:
row["begin_position"] = pd.to_datetime(row["begin_position"], format="%Y-%m-%dT%H:%M:%S%Z")
row["end_position"] = pd.to_datetime(row["end_position"], format="%Y-%m-%dT%H:%M:%S%Z")
row["begin_position"] = pd.to_datetime(row["begin_position"], format="%Y-%m-%dT%H:%M:%S%z")
row["end_position"] = pd.to_datetime(row["end_position"], format="%Y-%m-%dT%H:%M:%S%z")
return row
@staticmethod
......
......@@ -70,9 +70,13 @@ class Importing:
if len(df_values[df_values["ts_is_calculated"] == True]) > 0:
raise Exception("Calculated values cannot be imported")
# Check to see if timestep matches with the imported values. Ignore if timestep is 1
if not (((df_values.end_position - df_values.begin_position) / pd.Timedelta(seconds=1) == (df_values.apply(lambda x: U.actual_timestep(x.begin_position, x.ts_timestep), axis=1)))).all() and not (df_values.ts_timestep == -1).all():
raise Exception("The difference between end_position and begin_position must be the same as the samplingpoint timestep")
# Check to see if timestep matches with the imported values. Ignore if timestep is -1
df_errors = df_values[(((df_values.end_position - df_values.begin_position) / pd.Timedelta(seconds=1)).astype('int64') - ((df_values.apply(lambda x: U.actual_timestep(x.begin_position, x.ts_timestep), axis=1))).astype('int64')) > 0]
df_errors = df_errors[df_errors.ts_timestep != -1]
if len(df_errors) > 0:
l = len(df_errors)
f = df_errors.iloc[0]
raise Exception(f"Imported values does not match timestep. First error is at {f.sampling_point_id} {f.begin_position} {f.end_position} {f.ts_timestep}")
# Check if any are verified values
# if len(df_values[df_values["verification_flag"] == 1]) > 0:
......
from core.database import CursorFromPool
from core.data.mean import Mean, MeanType
import pandas as pd
from core.eea.generate_attainment.directives.common import get_annual_coverage, get_limitvalue
def get_alt(directive, regime, year):
fromtime = str(year) + "-01-01"
totime = str(year + 1) + "-01-01"
coverage = 75
fraction = 10
comparingFraction = 0
meantype = MeanType(directive["mean_type"])
limitvalue = get_limitvalue(directive)
exceedance_type = 1
with CursorFromPool() as cursor:
meanvalues = Mean.Aggregate(cursor, meantype, tuple(regime["samplingpoints"]), fromtime, totime, coverage, 3, fraction, True)
coverages_and_count_and_max = get_coverages_and_count_and_max(cursor, year, pd.DataFrame(meanvalues), limitvalue, comparingFraction)
if len(coverages_and_count_and_max) > 0:
df_with_coverage = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= 85)]
df_with_coverage_or_count = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= 85) | (coverages_and_count_and_max["count"] > directive["count"])]
if df_with_coverage_or_count.empty:
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
regime["used_samplingpoints"] = list(df_with_coverage_or_count["sampling_point_id"].unique())
cnt = df_with_coverage_or_count["count"].max().item()
mx = df_with_coverage["max_value"].max().item()
has_exceedances = cnt > directive["count"] if directive["count"] != None else False
value = cnt
return {"regime": regime, "value": value, "exceedance_type": exceedance_type, "has_exceedances": has_exceedances}
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
def get_coverages_and_count_and_max(cursor, year, df, limitvalue, factor):
spos = list(df["sampling_point_id"].unique())
counts = df.groupby('sampling_point_id')['value'].apply(lambda x: (round(x, factor) > limitvalue).sum()).reset_index(name='count')
values = df.groupby("sampling_point_id")["value"].max().reset_index(name='max_value')
df = get_annual_coverage(cursor, tuple(spos), year)
merged_df = pd.merge(df, counts, on="sampling_point_id")
merged_df = pd.merge(merged_df, values, on="sampling_point_id")
return merged_df
import pandas as pd
from core.database import CursorFromPool
from core.data.mean import Mean, MeanType
from core.eea.generate_attainment.directives.common import get_annual_coverage, get_pre_coverage, get_limitvalue
def get_cl(directive, regime, year):
fromtime = str(year) + "-01-01"
totime = str(year + 1) + "-01-01"
limitvalue = get_limitvalue(directive)
exceedance_type = 2
agg_coverage = 85
coverage = 85
fraction = 10
comparingFraction = 0
meantype = MeanType(directive["mean_type"])
with CursorFromPool() as cursor:
meanvalues = Mean.Aggregate(cursor, meantype, tuple(regime["samplingpoints"]), fromtime, totime, agg_coverage, 3, fraction, True)
coverages_and_count_and_max = get_coverages_and_count_and_max(cursor, year, pd.DataFrame(meanvalues), limitvalue, comparingFraction, directive)
if len(coverages_and_count_and_max) > 0:
df_with_coverage = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= coverage)]
df_with_coverage_or_count = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= coverage) | (coverages_and_count_and_max["count"] > directive["count"])]
if df_with_coverage_or_count.empty:
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
regime["used_samplingpoints"] = list(df_with_coverage_or_count["sampling_point_id"].unique())
cnt = df_with_coverage_or_count["count"].max().item()
mx = df_with_coverage["max_value"].max().item()
has_exceedances = mx > limitvalue
value = mx
return {"regime": regime, "value": value, "exceedance_type": exceedance_type, "has_exceedances": has_exceedances}
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
def get_coverages_and_count_and_max(cursor, year, df, limitvalue, factor, directive):
df["year"] = df.datetime.str[:4].astype(int)
spos = list(df["sampling_point_id"].unique())
counts = df.groupby(['sampling_point_id', 'year'])['value'].apply(lambda x: (round(x, factor) > limitvalue).sum()).reset_index(name='count')
values = df.groupby(['sampling_point_id', 'year'])["value"].max().reset_index(name='max_value')
if directive["pollutant"] == "O3" and directive["reportingmetric"] == "wMean":
df = get_pre_coverage(pd.DataFrame(df))
else:
df = get_annual_coverage(cursor, tuple(spos), year)
merged_df = pd.merge(df, counts, on=['sampling_point_id', 'year'])
merged_df = pd.merge(merged_df, values, on=['sampling_point_id', 'year'])
# if directive["pollutant"] == "O3" and directive["reportingmetric"] == "wMean":
# merged_df = merged_df.groupby(["sampling_point_id"]).agg(
# {
# "year": lambda x: x.max(),
# "coverage": lambda x: 100 if x.max() >= 85 else 0,
# "count": lambda x: round(x.mean()),
# "max_value": lambda x: x.max(),
# }
# ).reset_index()
return merged_df
import pandas as pd
from core.data.mean import Mean, MeanType
def get_annual_coverage(cursor, spos, year):
fromtime = str(year) + "-01-01"
totime = str(year + 1) + "-01-01"
meanvalues = Mean.Aggregate(cursor, MeanType.Year, spos, fromtime, totime, 0, 3, 3, False)
if len(meanvalues) == 0:
return pd.DataFrame(columns=["sampling_point_id", "coverage", "year"])
df = pd.DataFrame(meanvalues)
df["year"] = year
return df[["sampling_point_id", "coverage", "year"]]
def get_summer_winter_o3_coverage(df_meanvalues):
df_meanvalues["year"] = df_meanvalues.datetime.str[:4].astype(int)
g = df_meanvalues.groupby(["sampling_point_id", "year"])
cov = map(lambda x:
{
"sampling_point_id": x[0][0],
"coverage": get_summer_winter_coverage(x[1], int(x[0][1])),
"year": int(x[0][1])
}, list(g))
return pd.DataFrame(list(cov))
def get_pre_coverage(df_meanvalues):
return df_meanvalues[["sampling_point_id", "coverage", "year"]]
def get_summer_winter_coverage(df, year):
winterMonths = ["01", "02", "03", "10", "11", "12"]
summerMonths = ["04", "05", "06", "07", "08", "09"]
days_in_winter = 183 if is_leap_year(year) else 182
days_in_summer = 183
new_cov = 0
df = df[df.value != None]
no_of_days_winter = len(df[df.datetime.str[5:7].isin(winterMonths)])
no_of_days_summer = len(df[df.datetime.str[5:7].isin(summerMonths)])
if no_of_days_winter > 0 and no_of_days_summer > 0:
winter_coverage = no_of_days_winter / days_in_winter * 100
summer_coverage = no_of_days_summer / days_in_summer * 100
new_cov = 100 if round(winter_coverage) >= 70 and round(summer_coverage) >= 85 else 0
return new_cov
def is_leap_year(year):
return (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0)
def get_limitvalue(directive):
val = directive["value"]
if val == None:
val = directive["vegetation_value"]
if val == None:
val = directive["eco_value"]
if val == None:
val = 0
return int(val)
from core.database import CursorFromPool
from core.data.mean import Mean, MeanType
import pandas as pd
from core.eea.generate_attainment.directives.common import get_annual_coverage, get_limitvalue
def get_eco(directive, regime, year):
fromtime = str(year - 2) + "-01-01"
totime = str(year + 1) + "-01-01"
coverage = 85
fraction = 10
comparingFraction = 0
meantype = MeanType(directive["mean_type"])
limitvalue = get_limitvalue(directive)
exceedance_type = 2
with CursorFromPool() as cursor:
meanvalues = Mean.Aggregate(cursor, meantype, tuple(regime["samplingpoints"]), fromtime, totime, coverage, 3, fraction, True)
meanvalues = aggregate_all(meanvalues)
coverages_and_count_and_max = get_coverages_and_count_and_max(cursor, year, pd.DataFrame(meanvalues), limitvalue, comparingFraction)
if len(coverages_and_count_and_max) > 0:
df_with_coverage = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= 85)]
df_with_coverage_or_count = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= 85) | (coverages_and_count_and_max["count"] > directive["count"])]
if df_with_coverage_or_count.empty:
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
regime["used_samplingpoints"] = list(df_with_coverage_or_count["sampling_point_id"].unique())
cnt = df_with_coverage_or_count["count"].max().item()
mx = df_with_coverage["max_value"].max().item()
has_exceedances = mx > limitvalue
value = mx
return {"regime": regime, "value": value, "exceedance_type": exceedance_type, "has_exceedances": has_exceedances}
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
def get_coverages_and_count_and_max(cursor, year, df, limitvalue, factor):
spos = list(df["sampling_point_id"].unique())
counts = df.groupby('sampling_point_id')['value'].apply(lambda x: (round(x, factor) > limitvalue).sum()).reset_index(name='count')
values = df.groupby("sampling_point_id")["value"].max().reset_index(name='max_value')
df = get_annual_coverage(cursor, tuple(spos), year)
merged_df = pd.merge(df, counts, on="sampling_point_id")
merged_df = pd.merge(merged_df, values, on="sampling_point_id")
return merged_df
def aggregate_all(meanvalues):
df = pd.DataFrame(meanvalues)
sampling_point_id = df["sampling_point_id"].max()
datetime_max = df["datetime"].max()
g = df.groupby(["datetime"])
n = g.agg(
{
"sampling_point_id": lambda x: sampling_point_id,
"value": lambda x: x.mean(),
"cnt": lambda x: x.count(),
"coverage": lambda x: 100,
}
)
value = round(n["value"].mean(), 1)
meanvalues = {
"sampling_point_id": sampling_point_id,
"datetime": datetime_max,
"value": value,
"cnt": 1,
"coverage": 100,
}
return [meanvalues]
from core.database import CursorFromPool
from core.data.mean import Mean, MeanType
import pandas as pd
from core.eea.generate_attainment.directives.common import get_annual_coverage, get_limitvalue
def get_ert(directive, regime, year):
fromtime = str(year - 2) + "-01-01"
totime = str(year + 1) + "-01-01"
coverage = 85
fraction = 10
comparingFraction = 0
meantype = MeanType(directive["mean_type"])
limitvalue = get_limitvalue(directive)
exceedance_type = 2
with CursorFromPool() as cursor:
meanvalues = Mean.Aggregate(cursor, meantype, tuple(regime["samplingpoints"]), fromtime, totime, coverage, 3, fraction, True)
meanvalues = aggregate_all(meanvalues)
coverages_and_count_and_max = get_coverages_and_count_and_max(cursor, year, pd.DataFrame(meanvalues), limitvalue, comparingFraction)
if len(coverages_and_count_and_max) > 0:
df_with_coverage = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= 85)]
df_with_coverage_or_count = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= 85) | (coverages_and_count_and_max["count"] > directive["count"])]
if df_with_coverage_or_count.empty:
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
regime["used_samplingpoints"] = list(df_with_coverage_or_count["sampling_point_id"].unique())
cnt = df_with_coverage_or_count["count"].max().item()
mx = df_with_coverage["max_value"].max().item()
has_exceedances = mx > limitvalue
value = mx
return {"regime": regime, "value": value, "exceedance_type": exceedance_type, "has_exceedances": has_exceedances}
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
def get_coverages_and_count_and_max(cursor, year, df, limitvalue, factor):
spos = list(df["sampling_point_id"].unique())
counts = df.groupby('sampling_point_id')['value'].apply(lambda x: (round(x, factor) > limitvalue).sum()).reset_index(name='count')
values = df.groupby("sampling_point_id")["value"].max().reset_index(name='max_value')
df = get_annual_coverage(cursor, tuple(spos), year)
merged_df = pd.merge(df, counts, on="sampling_point_id")
merged_df = pd.merge(merged_df, values, on="sampling_point_id")
return merged_df
def aggregate_all(meanvalues):
df = pd.DataFrame(meanvalues)
sampling_point_id = df["sampling_point_id"].max()
datetime_max = df["datetime"].max()
g = df.groupby(["datetime"])
n = g.agg(
{
"sampling_point_id": lambda x: sampling_point_id,
"value": lambda x: x.mean(),
"cnt": lambda x: x.count(),
"coverage": lambda x: 100,
}
)
value = round(n["value"].mean(), 1)
meanvalues = {
"sampling_point_id": sampling_point_id,
"datetime": datetime_max,
"value": value,
"cnt": 1,
"coverage": 100,
}
return [meanvalues]
from core.database import CursorFromPool
from core.data.mean import Mean, MeanType
from core.eea.generate_attainment.directives.common import get_annual_coverage, get_limitvalue
import pandas as pd
def get_int(directive, regime, year):
fromtime = str(year) + "-01-01"
totime = str(year + 1) + "-01-01"
coverage = 75
fraction = 10
comparingFraction = 0
meantype = MeanType(directive["mean_type"])
limitvalue = get_limitvalue(directive)
exceedance_type = 1
with CursorFromPool() as cursor:
meanvalues = Mean.Aggregate(cursor, meantype, tuple(regime["samplingpoints"]), fromtime, totime, coverage, 3, fraction, True)
coverages_and_count_and_max = get_coverages_and_count_and_max(cursor, year, pd.DataFrame(meanvalues), limitvalue, comparingFraction)
if len(coverages_and_count_and_max) > 0:
df_with_coverage = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= 85)]
df_with_coverage_or_count = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= 85) | (coverages_and_count_and_max["count"] > directive["count"])]
if df_with_coverage_or_count.empty:
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
regime["used_samplingpoints"] = list(df_with_coverage_or_count["sampling_point_id"].unique())
cnt = df_with_coverage_or_count["count"].max().item()
mx = df_with_coverage["max_value"].max().item()
has_exceedances = cnt > directive["count"] if directive["count"] != None else False
value = cnt
return {"regime": regime, "value": value, "exceedance_type": exceedance_type, "has_exceedances": has_exceedances}
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
def get_coverages_and_count_and_max(cursor, year, df, limitvalue, factor):
spos = list(df["sampling_point_id"].unique())
counts = df.groupby('sampling_point_id')['value'].apply(lambda x: (round(x, factor) > limitvalue).sum()).reset_index(name='count')
values = df.groupby("sampling_point_id")["value"].max().reset_index(name='max_value')
df = get_annual_coverage(cursor, tuple(spos), year)
merged_df = pd.merge(df, counts, on="sampling_point_id")
merged_df = pd.merge(merged_df, values, on="sampling_point_id")
return merged_df
from collections import OrderedDict
from decimal import Decimal
limitvalues = [
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/6001'),
('pollutant', 'PM2.5'),
('mean_type', 4),
('limitvalue_type', 5),
('value', Decimal('25')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'TV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/14'),
('pollutant', 'Cd (aerosol)'),
('mean_type', 4),
('limitvalue_type', 5),
('value', Decimal('5')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'TV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/6015'),
('pollutant', 'Benzo(a)pyrene (air+aerosol)'),
('mean_type', 4),
('limitvalue_type', 5),
('value', Decimal('1')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'TV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/5014'),
('pollutant', 'Cd in PM10 (aerosol)'),
('mean_type', 4),
('limitvalue_type', 5),
('value', Decimal('5')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'TV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/5015'),
('pollutant', 'Ni in PM10 (aerosol)'),
('mean_type', 4),
('limitvalue_type', 5),
('value', Decimal('20')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'TV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/18'),
('pollutant', 'As (aerosol)'),
('mean_type', 4),
('limitvalue_type', 5),
('value', Decimal('6')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'TV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/5018'),
('pollutant', 'As in PM10 (aerosol)'),
('mean_type', 4),
('limitvalue_type', 5),
('value', Decimal('6')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'TV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/5129'),
('pollutant', 'Benzo(a)pyrene in PM10 (air+aerosol)'),
('mean_type', 4),
('limitvalue_type', 5),
('value', Decimal('1')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'TV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/5029'),
('pollutant', 'Benzo(a)pyrene in PM10 (aerosol)'),
('mean_type', 4),
('limitvalue_type', 5),
('value', Decimal('1')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'TV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/15'),
('pollutant', 'Ni (aerosol)'),
('mean_type', 4),
('limitvalue_type', 5),
('value', Decimal('20')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'TV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/7'),
('pollutant', 'O3'),
('mean_type', 6),
('limitvalue_type', 5),
('value', Decimal('120')),
('count', 25),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'daysAbove-3yr'),
('objectivetype', 'TV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/6001'),
('pollutant', 'PM2.5'),
('mean_type', 4),
('limitvalue_type', 1),
('value', Decimal('25')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'LV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/6001'),
('pollutant', 'PM2.5'),
('mean_type', 1),
('limitvalue_type', 1),
('value', Decimal('888')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'hrsAbove'),
('objectivetype', 'LV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/1'),
('pollutant', 'SO2'),
('mean_type', 1),
('limitvalue_type', 1),
('value', Decimal('350')),
('count', 24),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'hrsAbove'),
('objectivetype', 'LV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/8'),
('pollutant', 'NO2'),
('mean_type', 1),
('limitvalue_type', 1),
('value', Decimal('200')),
('count', 18),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'hrsAbove'),
('objectivetype', 'LV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/1'),
('pollutant', 'SO2'),
('mean_type', 2),
('limitvalue_type', 1),
('value', Decimal('125')),
('count', 3),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'daysAbove'),
('objectivetype', 'LV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/5'),
('pollutant', 'PM10'),
('mean_type', 2),
('limitvalue_type', 1),
('value', Decimal('50')),
('count', 35),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'daysAbove'),
('objectivetype', 'LV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/5'),
('pollutant', 'PM10'),
('mean_type', 4),
('limitvalue_type', 1),
('value', Decimal('40')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'LV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/20'),
('pollutant', 'Benzene'),
('mean_type', 4),
('limitvalue_type', 1),
('value', Decimal('5')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'LV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/8'),
('pollutant', 'NO2'),
('mean_type', 4),
('limitvalue_type', 1),
('value', Decimal('40')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'LV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/10'),
('pollutant', 'CO'),
('mean_type', 6),
('limitvalue_type', 1),
('value', Decimal('10')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'daysAbove'),
('objectivetype', 'LV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/7'),
('pollutant', 'O3'),
('mean_type', 9),
('limitvalue_type', 5),
('value', None),
('count', None),
('vegetation_value', Decimal('18000')),
('eco_value', None),
('reportingmetric', 'AOT40c-5yr'),
('objectivetype', 'TV')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/9'),
('pollutant', 'NOx'),
('mean_type', 4),
('limitvalue_type', 9),
('value', None),
('count', None),
('vegetation_value', Decimal('30')),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'CL')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/1'),
('pollutant', 'SO2'),
('mean_type', 11),
('limitvalue_type', 9),
('value', None),
('count', None),
('vegetation_value', Decimal('20')),
('eco_value', None),
('reportingmetric', 'wMean'),
('objectivetype', 'CL')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/1'),
('pollutant', 'SO2'),
('mean_type', 4),
('limitvalue_type', 9),
('value', None),
('count', None),
('vegetation_value', Decimal('20')),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'CL')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/7'),
('pollutant', 'O3'),
('mean_type', 1),
('limitvalue_type', 12),
('value', Decimal('180')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'hrsAbove'),
('objectivetype', 'INT')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/7'),
('pollutant', 'O3'),
('mean_type', 1),
('limitvalue_type', 7),
('value', Decimal('240')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', '3hAbove'),
('objectivetype', 'ALT')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/6001'),
('pollutant', 'PM2.5'),
('mean_type', 4),
('limitvalue_type', 11),
('value', Decimal('9.3')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'AEI'),
('objectivetype', 'ERT')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/6001'),
('pollutant', 'PM2.5'),
('mean_type', 4),
('limitvalue_type', 10),
('value', Decimal('20')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'AEI'),
('objectivetype', 'ECO')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/7'),
('pollutant', 'O3'),
('mean_type', 6),
('limitvalue_type', 6),
('value', Decimal('120')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'daysAbove'),
('objectivetype', 'LTO')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/7'),
('pollutant', 'O3'),
('mean_type', 9),
('limitvalue_type', 6),
('value', None),
('count', None),
('vegetation_value', Decimal('6000')),
('eco_value', None),
('reportingmetric', 'AOT40c'),
('objectivetype', 'LTO')
]),
OrderedDict([
('pollutant_uri', 'http://dd.eionet.europa.eu/vocabulary/aq/pollutant/5012'),
('pollutant', 'Lead in PM10 (aerosol)'),
('mean_type', 4),
('limitvalue_type', 1),
('value', Decimal('0.5')),
('count', None),
('vegetation_value', None),
('eco_value', None),
('reportingmetric', 'aMean'),
('objectivetype', 'LV')
])
]
from core.database import CursorFromPool
from core.data.mean import Mean, MeanType
import pandas as pd
from itertools import groupby
from core.eea.generate_attainment.directives.common import get_annual_coverage, get_summer_winter_o3_coverage, get_limitvalue, get_pre_coverage
def get_lto(directive, regime, year):
fromtime = str(year) + "-01-01"
totime = str(year + 1) + "-01-01"
limitvalue = get_limitvalue(directive)
exceedance_type = 1 if directive["count"] != None else 2
agg_coverage = 90 if directive["reportingmetric"] == "AOT40c" else 75
coverage = 90 if directive["reportingmetric"] == "AOT40c" else 85
fraction = 10
comparingFraction = 0
meantype = MeanType(directive["mean_type"])
with CursorFromPool() as cursor:
meanvalues = Mean.Aggregate(cursor, meantype, tuple(regime["samplingpoints"]), fromtime, totime, agg_coverage, 3, fraction, True)
coverages_and_count_and_max = get_coverages_and_count_and_max(cursor, year, pd.DataFrame(meanvalues), limitvalue, comparingFraction, directive)
if len(coverages_and_count_and_max) > 0:
df_with_coverage = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= coverage)]
df_with_coverage_or_count = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= coverage) | (coverages_and_count_and_max["count"] > directive["count"])]
if df_with_coverage_or_count.empty:
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
regime["used_samplingpoints"] = list(df_with_coverage_or_count["sampling_point_id"].unique())
use_count = directive["count"] != None
cnt = df_with_coverage_or_count["count"].max().item()
mx = df_with_coverage["max_value"].max().item()
has_exceedances = cnt > directive["count"] if use_count else mx > limitvalue
value = cnt if use_count else mx
return {"regime": regime, "value": value, "exceedance_type": exceedance_type, "has_exceedances": has_exceedances}
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
def get_coverages_and_count_and_max(cursor, year, df, limitvalue, factor, directive):
df["year"] = df.datetime.str[:4].astype(int)
spos = list(df["sampling_point_id"].unique())
counts = df.groupby(['sampling_point_id', 'year'])['value'].apply(lambda x: (round(x, factor) > limitvalue).sum()).reset_index(name='count')
values = df.groupby(['sampling_point_id', 'year'])["value"].max().reset_index(name='max_value')
if directive["pollutant"] == "O3" and directive["reportingmetric"] == "daysAbove":
df = get_summer_winter_o3_coverage(pd.DataFrame(df))
elif directive["reportingmetric"] == "AOT40c":
df = get_pre_coverage(pd.DataFrame(df))
else:
df = get_annual_coverage(cursor, tuple(spos), year)
merged_df = pd.merge(df, counts, on=['sampling_point_id', 'year'])
merged_df = pd.merge(merged_df, values, on=['sampling_point_id', 'year'])
if directive["pollutant"] == "O3" and directive["reportingmetric"] == "daysAbove-3yr":
merged_df = merged_df.groupby(["sampling_point_id"]).agg(
{
"year": lambda x: x.max(),
"coverage": lambda x: 100 if x.max() >= 85 else 0,
"count": lambda x: round(x.mean()),
"max_value": lambda x: x.max(),
}
).reset_index()
return merged_df
from core.database import CursorFromPool
from core.data.mean import Mean, MeanType
import pandas as pd
from core.eea.generate_attainment.directives.common import get_annual_coverage, get_limitvalue
def get_lv(directive, regime, year):
fromtime = str(year) + "-01-01"
totime = str(year + 1) + "-01-01"
coverage = 85 if directive["reportingmetric"] == "aMean" else 75
fraction = 10
comparingFraction = 1 if directive["pollutant"] == "Lead in PM10 (aerosol)" else 0
meantype = MeanType(directive["mean_type"])
limitvalue = get_limitvalue(directive)
exceedance_type = 1 if directive["count"] != None else 2
if regime["name"] == "ARE_NO0001_CO_LV_daysAbove_H":
pass
with CursorFromPool() as cursor:
meanvalues = Mean.Aggregate(cursor, meantype, tuple(regime["samplingpoints"]), fromtime, totime, coverage, 3, fraction, True)
coverages_and_count_and_max = get_coverages_and_count_and_max(cursor, year, pd.DataFrame(meanvalues), limitvalue, comparingFraction)
if len(coverages_and_count_and_max) > 0:
df_with_coverage = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= 85)]
df_with_coverage_or_count = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= 85) | (coverages_and_count_and_max["count"] > directive["count"])]
if df_with_coverage_or_count.empty:
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
regime["used_samplingpoints"] = list(df_with_coverage_or_count["sampling_point_id"].unique())
use_count = directive["count"] != None
cnt = df_with_coverage_or_count["count"].max().item()
mx = df_with_coverage["max_value"].max().item()
has_exceedances = cnt > directive["count"] if use_count else mx > limitvalue
value = cnt if use_count else mx
return {"regime": regime, "value": value, "exceedance_type": exceedance_type, "has_exceedances": has_exceedances}
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
def get_coverages_and_count_and_max(cursor, year, df, limitvalue, factor):
spos = list(df["sampling_point_id"].unique())
counts = df.groupby('sampling_point_id')['value'].apply(lambda x: (round(x, factor) > limitvalue).sum()).reset_index(name='count')
values = df.groupby("sampling_point_id")["value"].max().reset_index(name='max_value')
df = get_annual_coverage(cursor, tuple(spos), year)
merged_df = pd.merge(df, counts, on="sampling_point_id")
merged_df = pd.merge(merged_df, values, on="sampling_point_id")
return merged_df
from core.database import CursorFromPool
from core.data.mean import Mean, MeanType
import pandas as pd
from itertools import groupby
from core.eea.generate_attainment.directives.common import get_annual_coverage, get_summer_winter_o3_coverage, get_limitvalue, get_pre_coverage
def get_tv(directive, regime, year):
fromtime = str(year) + "-01-01"
totime = str(year + 1) + "-01-01"
if directive["reportingmetric"] == "daysAbove-3yr":
fromtime = str(year - 2) + "-01-01"
if directive["reportingmetric"] == "AOT40c-5yr":
fromtime = str(year - 4) + "-01-01"
limitvalue = get_limitvalue(directive)
exceedance_type = 1 if directive["count"] != None else 2
agg_coverage = 75 if directive["reportingmetric"] == "daysAbove-3yr" else 85
agg_coverage = 90 if directive["reportingmetric"] == "AOT40c-5yr" else agg_coverage
coverage = 90 if directive["reportingmetric"] == "AOT40c-5yr" else 85
fraction = 10
comparingFraction = 1 if directive["pollutant"] == "Lead in PM10 (aerosol)" else 0
meantype = MeanType(directive["mean_type"])
with CursorFromPool() as cursor:
meanvalues = Mean.Aggregate(cursor, meantype, tuple(regime["samplingpoints"]), fromtime, totime, agg_coverage, 3, fraction, True)
if directive["reportingmetric"] == "AOT40c-5yr":
meanvalues = convert_aot40(meanvalues)
coverages_and_count_and_max = get_coverages_and_count_and_max(cursor, year, pd.DataFrame(meanvalues), limitvalue, comparingFraction, directive)
if len(coverages_and_count_and_max) > 0:
df_with_coverage = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= coverage)]
df_with_coverage_or_count = coverages_and_count_and_max[(coverages_and_count_and_max["coverage"] >= coverage) | (coverages_and_count_and_max["count"] > directive["count"])]
if df_with_coverage_or_count.empty:
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
regime["used_samplingpoints"] = list(df_with_coverage_or_count["sampling_point_id"].unique())
use_count = directive["count"] != None
cnt = df_with_coverage_or_count["count"].max().item()
mx = df_with_coverage["max_value"].max().item()
has_exceedances = cnt > directive["count"] if use_count else mx > limitvalue
value = cnt if use_count else mx
return {"regime": regime, "value": value, "exceedance_type": exceedance_type, "has_exceedances": has_exceedances}
return {"regime": regime, "value": 0, "exceedance_type": exceedance_type, "has_exceedances": False}
def get_coverages_and_count_and_max(cursor, year, df, limitvalue, factor, directive):
df["year"] = df.datetime.str[:4].astype(int)
spos = list(df["sampling_point_id"].unique())
counts = df.groupby(['sampling_point_id', 'year'])['value'].apply(lambda x: (round(x, factor) > limitvalue).sum()).reset_index(name='count')
values = df.groupby(['sampling_point_id', 'year'])["value"].max().reset_index(name='max_value')
if directive["pollutant"] == "O3" and directive["reportingmetric"] == "daysAbove-3yr":
df = get_summer_winter_o3_coverage(pd.DataFrame(df))
elif directive["reportingmetric"] == "AOT40c-5yr":
df = get_pre_coverage(pd.DataFrame(df))
else:
df = get_annual_coverage(cursor, tuple(spos), year)
merged_df = pd.merge(df, counts, on=['sampling_point_id', 'year'])
merged_df = pd.merge(merged_df, values, on=['sampling_point_id', 'year'])
if directive["pollutant"] == "O3" and directive["reportingmetric"] == "daysAbove-3yr":
merged_df = merged_df.groupby(["sampling_point_id"]).agg(
{
"year": lambda x: x.max(),
"coverage": lambda x: 100 if x.max() >= 85 else 0,
"count": lambda x: round(x.mean()),
"max_value": lambda x: x.max(),
}
).reset_index()
return merged_df
def convert_aot40(meanvalues):
df_meanvalues = pd.DataFrame(meanvalues)
g = df_meanvalues.groupby(["sampling_point_id", "unit", "station", "component", "timestep", "lng", "lat", "meantype"])
n = g.agg(
{
"value": lambda x: x.mean(),
"cnt": lambda x: x.count(),
"coverage": lambda x: 100 if (x >= 85).sum() >= 3 else 0,
"datetime": lambda x: x.max()
}
)
meanvalues = n.reset_index().to_dict(orient="records")
return meanvalues
from core.database import CursorFromPool
from core.data.mean import Mean, MeanType
from core.eea.generate_attainment.directives.lv import get_lv
from core.eea.generate_attainment.directives.tv import get_tv
from core.eea.generate_attainment.directives.cl import get_cl
from core.eea.generate_attainment.directives.int import get_int
from core.eea.generate_attainment.directives.alt import get_alt
from core.eea.generate_attainment.directives.eco import get_eco
from core.eea.generate_attainment.directives.ert import get_ert
from core.eea.generate_attainment.directives.lto import get_lto
from core.eea.generate_attainment.directives.limitvalues import limitvalues
import time
start = time.time()
def unique_spos(assessmentregimes):
spos = [] # list of unique sampling points
for regime in assessmentregimes:
if regime["samplingpoints"] is not None:
for spo in regime["samplingpoints"]:
if spo not in spos:
spos.append(spo)
return spos
def get_assessmentregimes(year):
with CursorFromPool() as cursor:
sql = """
WITH spos as (
select assessmentregime_id, array_agg(assessmentlocal_id) as samplingpoints
from assessmentdata
group by assessmentregime_id
)
SELECT ar.*, a.samplingpoints
FROM assessmentregimes ar left join spos a on ar.id = a.assessmentregime_id
WHERE ar.thresholdclassificationyear = %(year)s
"""
cursor.execute(sql, {"year": year})
assessmentregimes = cursor.fetchall()
return assessmentregimes
def insert_attainments(attainments, deleteExistingAttainments):
with CursorFromPool() as cursor:
if deleteExistingAttainments:
cursor.execute("DELETE FROM attainments")
for attainment in attainments:
cursor.execute(
"""
INSERT INTO attainments
(id, name, assessmentregime_id, comment)
VALUES
(%(id)s, %(name)s, %(assessmentregime_id)s, %(comment)s)
""",
attainment)
cursor.execute(
"""
INSERT INTO exceedancedescriptions
(id, attainment_id, exceedancedescription_element, max_value, exceedances, excedance_type, adjustment_type, area_classification, exceedance_reason, population_reference_year, exposed_population, surface_area)
VALUES
(%(id)s, %(attainment_id)s, %(exceedancedescription_element)s, %(max_value)s, %(exceedances)s, %(exceedance_type)s, %(adjustment_type)s, %(area_classification)s, %(exceedance_reason)s, %(population_reference_year)s, %(exposed_population)s, %(surface_area)s)
""",
attainment["exceedance"])
def generate(year, deleteExistingAttainments):
assessmentregimes = get_assessmentregimes(year)
spos = unique_spos(assessmentregimes)
exceedances = []
attainments = []
for regime in assessmentregimes:
directives = list(filter(lambda x: x["pollutant_uri"] == regime["pollutant"] and x["reportingmetric"] == regime["reportingmetric"] and x["objectivetype"] == regime["objecttype"], limitvalues))
if len(directives) == 0:
continue
directive = directives[0]
# Return empty if no sampling points
if regime["samplingpoints"] is None:
exceedances.append({"regime": regime, "value": 0, "exceedance_type": 1 if directive["count"] != None else 2, "has_exceedances": False})
continue
# LV
if directive["objectivetype"] == "LV": # and regime["samplingpoints"] is not None:
lv = get_lv(directive, regime, year)
if lv != None:
exceedances.append(lv)
# TV
if directive["objectivetype"] == "TV": # and regime["samplingpoints"] is not None:
tv = get_tv(directive, regime, year)
if tv != None:
exceedances.append(tv)
# CL
if directive["objectivetype"] == "CL": # and regime["samplingpoints"] is not None:
cl = get_cl(directive, regime, year)
if cl != None:
exceedances.append(cl)
# INT
if directive["objectivetype"] == "INT": # and regime["samplingpoints"] is not None:
int = get_int(directive, regime, year)
if int != None:
exceedances.append(int)
# ALT
if directive["objectivetype"] == "ALT": # and regime["samplingpoints"] is not None:
alt = get_alt(directive, regime, year)
if alt != None:
exceedances.append(alt)
# ECO
if directive["objectivetype"] == "ECO": # and regime["samplingpoints"] is not None:
eco = get_eco(directive, regime, year)
if eco != None:
exceedances.append(eco)
# ERT
if directive["objectivetype"] == "ERT": # and regime["samplingpoints"] is not None:
ert = get_ert(directive, regime, year)
if ert != None:
exceedances.append(ert)
# LTO
if directive["objectivetype"] == "LTO": # and regime["samplingpoints"] is not None:
lto = get_lto(directive, regime, year)
if lto != None:
exceedances.append(lto)
for exceedance in exceedances:
att_id = exceedance["regime"]["id"].replace("ARE", "ATT")
exc_id = exceedance["regime"]["id"].replace("ATT", "EXC")
attainment_exceedance = {
"id": exc_id,
"attainment_id": att_id,
"exceedancedescription_element": 3,
"exceedance_type": exceedance["exceedance_type"],
"max_value": exceedance["value"],
"exceedances": exceedance["has_exceedances"],
"adjustment_type": "noneApplicable" if exceedance["has_exceedances"] else None,
"area_classification": "http://dd.eionet.europa.eu/vocabulary/aq/areaclassification/rural" if exceedance["has_exceedances"] else None,
"exceedance_reason": "S1" if exceedance["has_exceedances"] else None,
"population_reference_year": year if exceedance["has_exceedances"] else None,
"exposed_population": 0 if exceedance["has_exceedances"] else None,
"surface_area": 0 if exceedance["has_exceedances"] else None,
}
attainment = {"id": att_id, "name": att_id, "assessmentregime_id": exceedance["regime"]["id"], "comment": None, "exceedance": attainment_exceedance}
attainments.append(attainment)
insert_attainments(attainments, deleteExistingAttainments)
# print("Attainments:\t", len(exceedances))
end = time.time()
# print("Done in " + str(end - start) + " seconds")
# print("Attainments:\t\t\t", len(exceedances))
# None = 0, LimitValue = 1, MarginOfTolerance = 2, UpperAssessmentThreshold = 3, LowerAssessment Threshold = 4, TargetValue = 5, LongTermObjective = 6, ATF = 7, ITF = 8, CL=9, NorLimitValue = 98, AirqualityCriteria=99, ECO = 10, ERT = 11, INT = 12
# None = 0, Hour = 1, Day = 2, MovingEightHour = 3, Year = 4, MovingDay = 5, MovingEightHourMax = 6, Month = 7, WinterYear = 8, Aot40Vegetation = 9, Aot40ForestProtection = 10, WinterSeason = 11
......@@ -10,3 +10,11 @@ class AttainmentModel(BaseModel):
def __getitem__(self, key):
return super().__getattribute__(key)
class GenerateModel(BaseModel):
year: int
deleteExistingAttainments: bool
def __getitem__(self, key):
return super().__getattribute__(key)
......@@ -2,9 +2,10 @@ from flask import jsonify, Blueprint, request
from flask_jwt_extended import jwt_required
from werkzeug.exceptions import BadRequest
from core.database import CursorFromPool
from endpoints.management.attainments.models import AttainmentModel
from endpoints.management.attainments.models import AttainmentModel, GenerateModel
from core.query import Q, DeleteModel
from core.jwt_ext_custom import jwt_required_with_management_claim, jwt_required_with_allnetworks_claim
from core.eea.generate_attainment.g import generate
attainments_endpoint = Blueprint('attainments', __name__)
......@@ -96,3 +97,12 @@ def attainments_delete():
if rows == 0:
raise BadRequest("Could not delete for ids " + {','.join(model.ids)})
return jsonify({"success": True})
@attainments_endpoint.route('/api/management/attainments/generate', methods=['POST'])
@jwt_required_with_management_claim()
@jwt_required_with_allnetworks_claim()
def attainments_generate():
model = GenerateModel(**request.json)
generate(model["year"], model["deleteExistingAttainments"])
return jsonify({"success": True})
......@@ -2,7 +2,7 @@
const props = defineProps({
show: Boolean,
title: String,
text: String,
text: String
});
const emit = defineEmits(["ok", "close"]);
......@@ -20,7 +20,7 @@ const ok = () => {
<div class="absolute right-0 top-0 bottom-0 w-full z-[998]" v-if="show">
<div class="absolute z-[999] px-6 py-4 bg-white border border-nord4 rounded shadow-xl left-1/2 top-1/4 flex flex-col min-w-40 max-w-72">
<div class="font-bold mb-1 text-lg">{{ title }}</div>
<div class="mb-4">{{ text }}</div>
<div class="mb-4" v-html="text"></div>
<div class="flex justify-end">
<button class="n-button outline outline-2 outline-nord14 mr-4" @click="ok">Ok</button>
<button class="n-button outline outline-2 outline-nord11" @click="close">Cancel</button>
......