Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
VERIFY
CIF
Commits
ba143996
Commit
ba143996
authored
Sep 13, 2021
by
Antoine Berchet
Browse files
Adding Empa NetCDF footprints
parent
0596f010
Changes
4
Hide whitespace changes
Inline
Side-by-side
pycif/plugins/models/flexpart/io/inputs/fluxes.py
View file @
ba143996
...
...
@@ -3,7 +3,7 @@ from logging import debug
import
numpy
as
np
import
pandas
as
pd
from
.read
import
read_flexpart_grid
from
...utils
.read
import
read_flexpart_grid
def
flux_contribution
(
self
,
mode
,
subdir
,
dataobs
,
...
...
pycif/plugins/models/flexpart/io/inputs/inicond.py
View file @
ba143996
...
...
@@ -3,7 +3,7 @@ import numpy as np
import
pandas
as
pd
from
logging
import
debug
from
.read
import
read_flexpart_gridinit
from
...utils
.read
import
read_flexpart_gridinit
def
inicond_contribution
(
self
,
mode
,
subdir
,
dataobs
,
...
...
pycif/plugins/models/flexpart/io/native2inputs_adj.py
View file @
ba143996
...
...
@@ -7,7 +7,7 @@ import xarray as xr
from
logging
import
info
,
debug
from
..utils.flexpart_header
import
read_header
from
pycif.plugins.models.flexpart.io.input
s.read
import
read_flexpart_grid
,
read_flexpart_gridinit
from
..util
s.read
import
read_flexpart_grid
,
read_flexpart_gridinit
def
native2inputs_adj
(
...
...
pycif/plugins/models/flexpart/
io/inp
uts/read.py
→
pycif/plugins/models/flexpart/ut
il
s/read.py
View file @
ba143996
...
...
@@ -8,6 +8,7 @@ import numpy as np
from
scipy.io
import
FortranFile
import
datetime
from
logging
import
info
from
netCDF4
import
Dataset
def
read_flexpart_grid
(
subdir
,
file_name
,
fp_header
,
**
kwargs
):
...
...
@@ -27,12 +28,29 @@ def read_flexpart_grid(subdir, file_name, fp_header, **kwargs):
# Numeric scaling factor
numscale
=
np
.
float
(
kwargs
.
get
(
"numscale"
,
1.E12
))
file_dates
=
os
.
path
.
join
(
subdir
,
'dates'
)
path_file
=
os
.
path
.
join
(
subdir
,
file_name
)
scaleconc
=
1.e12
if
"nc"
in
path_file
:
grid_fp
,
ngrid
,
gtime_dt
=
\
read_grid_nc
(
path_file
,
file_dates
,
fp_header
)
else
:
grid_fp
,
ngrid
,
gtime_dt
=
\
read_grid
(
path_file
,
file_dates
,
fp_header
,
scaleconc
)
# Convert from ppt to ppmv or ppbv
# Convert s.m3/kg to s.m2/kg and apply numerical scaling
grid_fp
=
grid_fp
/
(
fp_header
.
outheight
[
0
]
*
numscale
)
return
grid_fp
,
gtime_dt
,
ngrid
def
read_grid
(
path_file
,
file_dates
,
fp_header
,
scaleconc
):
days
=
[]
times
=
[]
counts_i
=
[]
...
...
@@ -49,69 +67,74 @@ def read_flexpart_grid(subdir, file_name, fp_header, **kwargs):
spi
=
f
.
read_ints
(
'i4'
)
r
=
f
.
read_ints
(
'i4'
)
spr
=
f
.
read_reals
(
'f4'
)
days
.
append
(
yyyymmdd
)
times
.
append
(
hhmmss
)
counts_i
.
extend
(
i
)
sparse_i
.
extend
(
spi
)
counts_r
.
extend
(
r
)
sparse_r
.
extend
(
spr
)
except
TypeError
:
break
gtime_dt
=
[
datetime
.
datetime
.
strptime
(
'{}{}'
.
format
(
d
,
h
),
'%Y%m%d%H%M%S'
)
for
d
,
h
in
zip
(
days
,
times
)][::
-
1
]
ngrid
=
len
(
gtime_dt
)
grid_fp
=
np
.
zeros
((
fp_header
.
numx
,
fp_header
.
numy
,
ngrid
+
2
))
if
len
(
sparse_r
)
>
0
:
sign
=
np
.
sign
(
sparse_r
)
cum_counts
=
np
.
unique
(
np
.
cumsum
(
counts_r
)
%
sign
.
size
)
signchange
=
((
np
.
roll
(
sign
,
1
)
-
sign
)
!=
0
)
signchange
[
cum_counts
]
=
True
inds_out
=
np
.
zeros
((
len
(
sparse_r
)),
dtype
=
np
.
int
)
inds_out
[
signchange
]
=
sparse_i
mask
=
inds_out
==
0
idx
=
np
.
where
(
~
mask
,
np
.
arange
(
mask
.
size
),
0
)
np
.
maximum
.
accumulate
(
idx
,
out
=
idx
)
inds_out
=
inds_out
[
idx
]
+
np
.
arange
(
mask
.
size
)
\
-
idx
-
fp_header
.
numx
*
fp_header
.
numy
jy
,
jx
=
np
.
unravel_index
(
inds_out
,
(
fp_header
.
numy
,
fp_header
.
numx
))
jt
=
np
.
zeros
((
len
(
sparse_r
)),
dtype
=
np
.
int
)
jt
[
cum_counts
]
=
np
.
arange
(
ngrid
)[
np
.
array
(
counts_r
)
>
0
]
np
.
maximum
.
accumulate
(
jt
,
out
=
jt
)
jt
=
ngrid
-
jt
-
1
grid_fp
[
jx
,
jy
,
jt
]
=
np
.
abs
(
sparse_r
)
*
scaleconc
# grid_fp, ngrid, gtime = mod_flexpart.read_grid(
# path_file, file_dates, fp_header.numx, fp_header.numy,
# fp_header.maxngrid, fp_header.xshift, fp_header.ndgrid, fp_header.trajdays)
# Convert from ppt to ppmv or ppbv
return
grid_fp
,
ngrid
,
gtime_dt
# Convert s.m3/kg to s.m2/kg and apply numerical scaling
grid_fp
=
grid_fp
/
(
fp_header
.
outheight
[
0
]
*
numscale
)
# # Convert grid times to datetime format
# gtime_dt = []
# for i in range(len(gtime)):
# # gtime_dt[i] = flexpart_header.Flexpartheader.j2d(gtime[i])
# if gtime[i] == 0.:
# break
#
# # gtime_dt.append(fp_header.j2d(gtime[i]))
# gtime_dt.append(j2d(gtime[i]))
return
grid_fp
,
gtime_dt
,
ngrid
def
read_grid_nc
(
path_file
,
file_dates
,
fp_header
):
with
Dataset
(
path_file
)
as
nc
:
# spec001 has dimension nageclass, numpoint, time, level, lat, lon
# Assume that there is 1 numpoint/station per file
shape
=
nc
[
'spec001'
].
shape
if
shape
[
0
]
>
1
or
shape
[
1
]
>
1
:
info
(
'WARNING: There are more than 1 station in the flexpart file.'
)
if
shape
[
3
]
>
1
:
info
(
'INFO: Select the bottom layer of the grid'
)
grid
=
nc
[
'spec001'
][
0
,
0
,
:,
0
,
:,
:]
# time, lat, lon
# swap axes to get it to lon, lat, time
grid_fp
=
np
.
swapaxes
(
grid
,
0
,
-
1
)
times
=
np
.
sort
(
nc
[
'time'
][:])
enddate
=
datetime
.
strptime
(
nc
.
getncattr
(
'iedate'
),
'%Y%m%d'
)
gtime
=
[]
for
t
in
times
:
gtime
.
append
(
enddate
+
datetime
.
timedelta
(
seconds
=
int
(
t
)))
grid_fp
*=
1e12
# This is applied in mod_flexpart
return
grid_fp
,
len
(
gtime
),
gtime
def
read_flexpart_gridinit
(
subdir
,
filename
,
fp_header
,
scaleconc
=
1
,
**
kwargs
):
path_file
=
os
.
path
.
join
(
subdir
,
filename
)
with
FortranFile
(
path_file
,
'r'
)
as
f
:
yyyymmdd
=
f
.
read_ints
(
'i4'
)[
0
].
astype
(
str
)
...
...
@@ -120,7 +143,7 @@ def read_flexpart_gridinit(subdir, filename, fp_header,
sparse_i
=
f
.
read_ints
(
'i4'
)
sp_count_r
=
f
.
read_ints
(
'i4'
)[
0
]
sparse_r
=
f
.
read_reals
(
'f4'
)
nx
=
fp_header
.
numx
ny
=
fp_header
.
numy
nz
=
(
fp_header
.
outheight
!=
0.
).
sum
()
...
...
@@ -131,10 +154,10 @@ def read_flexpart_gridinit(subdir, filename, fp_header,
cum_counts
=
np
.
unique
(
np
.
cumsum
(
sp_count_r
)
%
sign
.
size
)
signchange
=
((
np
.
roll
(
sign
,
1
)
-
sign
)
!=
0
)
signchange
[
cum_counts
]
=
True
inds_out
=
np
.
zeros
((
len
(
sparse_r
)),
dtype
=
np
.
int
)
inds_out
[
signchange
]
=
sparse_i
mask
=
inds_out
==
0
idx
=
np
.
where
(
~
mask
,
np
.
arange
(
mask
.
size
),
0
)
np
.
maximum
.
accumulate
(
idx
,
out
=
idx
)
...
...
Jean Matthieu Haussaire
@jhaussaire
mentioned in issue
#15
·
Sep 17, 2021
mentioned in issue
#15
mentioned in issue #15
Toggle commit list
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment