Commit 2939bde6 authored by Jeff Piollé's avatar Jeff Piollé

docs updates

parent f879b53c
......@@ -287,7 +287,7 @@ class Field(newfield.Field):
self.array.attrs['units'] = units
@property
def datatype(self):
def dtype(self):
if self.mapper is None:
return self.array.dtype
else:
......@@ -621,7 +621,7 @@ class Field(newfield.Field):
values = partial(operator(field1, field2))
field = Field(variable,
dims=copy.copy(field1.dimensions),
datatype=field1.datatype,
datatype=field1.dtype,
fillvalue=field1.fillvalue,
values=values,
units=field1.units)
......@@ -807,7 +807,7 @@ def module(u, v, variable=None):
variable = Variable(varname)
field = Field(variable,
dims=copy.copy(u.dimensions),
datatype=u.datatype,
datatype=u.dtype,
fillvalue=u.fillvalue,
values=values,
units=u.units)
......
......@@ -16,7 +16,6 @@ from typing import (Any, Dict, Hashable, Iterable, Iterator, List,
import numpy
import xarray as xr
from cerbere.datamodel.variable import Variable
import cerbere.cfconvention as cf
__all__ = ['Field']
......@@ -198,8 +197,7 @@ class Field(object):
"""
return Field(data=data)
@property
def to_dataarray(self):
def to_dataarray(self) -> 'DataArray':
"""Return the field values a xarray DataArray"""
if self.dataset is None:
return self.array
......@@ -242,7 +240,8 @@ class Field(object):
return result
@property
def name(self):
def name(self: str):
"""Name of the field"""
return self.array.name
@name.setter
......@@ -250,15 +249,17 @@ class Field(object):
self.array.name = value
@property
def attrs(self):
def attrs(self) -> Mapping[str, Any]:
"""A dict of the field attributes"""
return self.array.attrs
@attrs.setter
def attrs(self, attrs):
def attrs(self, attrs: Mapping[str, Any]):
self.array.attrs = attrs
@property
def dims(self):
"""A tuple of the field dimensions"""
if self.dataset is None:
return tuple(self.array.dims)
else:
......@@ -272,7 +273,8 @@ class Field(object):
self.dataset.set_dimensions(dims)
@property
def dimnames(self):
def dimnames(self) -> Tuple[str]:
"""Tuple of the field's dimension names"""
return tuple(self.dims.keys())
def get_dimsize(self, dimname) -> int:
......@@ -283,47 +285,47 @@ class Field(object):
return self.dataset.get_dimsize(dimname)
@property
def fill_value(self):
"""return the value for missing data"""
def fill_value(self) -> Union[Any, None]:
"""The value for missing data in the field"""
try:
return self.array.encoding['_FillValue']
except KeyError:
return None
@fill_value.setter
def fill_value(self, fill_value):
def fill_value(self, fill_value: Any):
"""set the value for missing data"""
self.array.encoding['_FillValue'] = fill_value
@property
def valid_min(self):
"""return the minimum valid value"""
def valid_min(self) -> Union[Any, None]:
"""The minimum valid value in the field data"""
try:
return self.array.attrs['valid_min']
except KeyError:
return None
@valid_min.setter
def valid_min(self, value):
def valid_min(self, value: Any):
"""set the minimum valid value"""
self.array.attrs['valid_min'] = value
@property
def valid_max(self):
"""return the maximum valid value"""
def valid_max(self) -> Union[Any, None]:
"""The maximum valid value in the field data"""
try:
return self.array.attrs['valid_max']
except KeyError:
return None
@valid_max.setter
def valid_max(self, value):
def valid_max(self, value: Any):
"""set the maximum valid value"""
self.array.attrs['valid_max'] = value
@property
def units(self) -> str:
"""return the field units (``units`` CF attribute)"""
def units(self) -> Union[str, None]:
"""The field data units (``units`` CF attribute)"""
try:
return self.array.attrs['units']
except KeyError:
......@@ -331,12 +333,12 @@ class Field(object):
@units.setter
def units(self, units: str):
"""set the variable units (``units`` CF attribute)"""
"""Set the field data units (``units`` CF attribute)"""
self.array.attrs['units'] = units
@property
def description(self):
"""return the field description (``long_name`` CF attribute)"""
def description(self) -> Union[str, None]:
"""The field description (``long_name`` CF attribute)"""
try:
return self.array.attrs['long_name']
except KeyError:
......@@ -348,8 +350,8 @@ class Field(object):
self.array.attrs['long_name'] = description
@property
def standard_name(self) -> str:
"""return the field standard name (``standard_name`` CF attribute)"""
def standard_name(self) -> Union[str, None]:
"""The field standard name (``standard_name`` CF attribute)"""
try:
return (
self.array.attrs['standard_name'],
......@@ -372,36 +374,14 @@ class Field(object):
self.array.attrs['authority'] = None
@property
def datatype(self):
def dtype(self):
"""The type of the field data"""
if self.dataset is None:
return self.array.dtype
else:
return self.dataset.dataset[self.name].dtype
@property
def variable(self):
"""return the field variable definition"""
var = Variable(self.array.name)
if 'long_name' in self.array.attrs:
var.description = self.array.attrs['long_name']
if 'standard_name' in self.array.attrs:
var.standardname = self.array.attrs['standard_name']
try:
var.authority = self.array.attrs['authority']
except KeyError:
logging.error(
"No authority attribute defined for standard name: {}"
.format(var.standardname)
)
return var
@variable.setter
def variable(self, variable):
"""set the field variable definition"""
self.array.name = variable.shortname
self.array.attrs['long_name'] = variable.description
self.array.attrs['authority'] = variable.authority
self.array.attrs['standard_name'] = variable.standardname
def is_composite(self) -> bool:
"""
......@@ -646,7 +626,7 @@ class Field(object):
meanings,
index: Mapping[Hashable, Any]=None,
**kwargs):
"""helper function to get a boolean mask from a bit field.
"""helper function to test specific bits in a CF compliant mask
Bit (or flag) fields are arrays of integers where each bit has a
specific meaning, described in a ``flag_meaning`` field attribute.
......@@ -731,7 +711,7 @@ class Field(object):
field = Field(data=values,
name=varname,
dims=copy.copy(field1.dims),
datatype=field1.datatype,
datatype=field1.dtype,
fillvalue=field1.fill_value,
**kwargs)
return field
......@@ -833,7 +813,7 @@ def module(u, v, variable=None):
variable = Variable(varname)
field = Field(variable,
dims=copy.copy(u.dimensions),
datatype=u.datatype,
datatype=u.dtype,
fillvalue=u.fill_value,
values=values,
units=u.units)
......
......@@ -180,10 +180,10 @@ class Feature(Dataset):
)
return result
@classmethod
def get_model_name(cls):
"""Return the name of the datamodel"""
return cls.__name__
@property
def feature_type(self) -> str:
"""Return the type of the feature"""
return self.__class__.__name__
def has_coordinate(self, coord):
"""Return True if the coordinate variable is defined"""
......@@ -231,12 +231,13 @@ class Feature(Dataset):
:class:`shapely.geometry.Polygon`: the bounding box
'''
if 'bbox' not in self.dataset.attrs:
if 'bbox' not in self.attrs:
self.set_bbox()
return self._bbox
def get_wkt_bbox(self):
"""Return the bounding box in WKT format."""
@property
def wkt_bbox(self) -> str:
"""The bounding box in WKT format."""
return self._bbox.wkt
......
......@@ -66,6 +66,14 @@ class Grid(Feature):
:class:`cerbere.datamodel.field.Field` class objects, or
:class:`xarray.DataArray` class objects.
.. code-block:: python
from cerbere.feature.grid import Grid
from cerbere.dataset.ncdataset import NCDataset
ncf = NCDataset(url='SSS_SMOS_L3_Daily_0.5deg_CATDS_CECOS_2012.12.30_V02.nc')
g = Grid(ncf)
Args:
content (xarray, mapper or dict of fields): the content from which to
build the feature coordinates and data fields.
......
......@@ -56,7 +56,40 @@ Creating a field
.. autosummary::
:toctree: generated
dataset.field.Field
Field
Field.to_field
Attributes
----------
.. autosummary::
:toctree: generated
Field.name
Field.dims
Field.dimnames
Field.dtype
Field.description
Field.standard_name
Field.fill_value
Field.valid_min
Field.valid_max
Field.units
Field.attrs
Field contents
--------------
.. autosummary::
:toctree: generated
Field.clone
Field.rename
Field.bitmask_or
Field.get_values
Field.set_values
Field.to_dataarray
Field.compute
Field.module
Feature
=======
......@@ -68,8 +101,31 @@ Creating a feature
.. autosummary::
:toctree: generated
feature.Feature
trajectory.Trajectory
grid.Grid
swath.Swath
pointcollection.PointCollection
Attributes
----------
.. currentmodule:: cerbere.feature.feature
.. autosummary::
:toctree: generated
Feature.feature_type
Feature.geodims
Feature.geodimnames
Feature.geodimsizes
Feature.bbox
Feature.wkt_bbox
Feature contents
----------------
.. autosummary::
:toctree: generated
get_lat
get_lon
get_times
\ No newline at end of file
.. |Grid| replace:: :class:`~cerbere.feature.grid.Grid`
.. |GridTimeSeries| replace:: :class:`~cerbere.feature.gridtimeseries.GridTimeSeries`
.. |Trajectory| replace:: :class:`~cerbere.feature.trajectory.Trajectory`
.. |Swath| replace:: :class:`~cerbere.feature.swath.Swath`
.. |PointCollection| replace:: :class:`~cerbere.feature.pointcollection.PointCollection`
.. |NCDataset| replace:: :class:`~cerbere.dataset.ncdataset.NCDataset`
.. |GHRSSTNCDataset| replace:: :class:`~cerbere.dataset.ghrsstncdataset.GHRSSTNCDataset`
Datasets
========
**cerbere** mappers and data models have been tested with the following list of
products, ordered here by Dataset. To minimize the amount of modules or
dependencies to install, mappers are grouped by contrib packages, so that users
only need to install the contribs for the Dataset classes they need.
**cerbere** :mod:`~cerbere.dataset` and :mod:`~cerbere.feature` classes have been
tested with the following list of products. To minimize the amount of modules or
dependencies to install, :mod:`~cerbere.dataset` classes are grouped by contrib
packages, so that users only need to install the contribs for the
:mod:`~cerbere.dataset` classes they need.
Built-in Dataset classes
------------------------
......@@ -12,14 +22,13 @@ Built-in Dataset classes
These are the Dataset classes provided with the base cerbere package.
.. csv-table:: **cerbere** package
:header: "Dataset", "Compatible products"
:widths: 15, 40
:header: "Dataset", "Feature", "Compatible products"
"NCDataset", "Most CF compliant netCDF dataset (if correctly complying!)"
, "CATDS SMOS SSS CEC-OS products"
, "AVISO L2 (S)GDR"
, "Ifremer (Boyer) Mixed Layer Depth climatology"
"GHRSSTNCDataset", "GHRSST L2P, L3 and L4 datasets"
|NCDataset|, , "Most CF compliant netCDF dataset (if correctly complying!)"
, |Grid|, "CATDS SMOS SSS CEC-OS products"
, |Trajectory|, "AVISO L2 (S)GDR"
, |Grid|, "Ifremer (Boyer) Mixed Layer Depth climatology"
|GHRSSTNCDataset|, |Grid| or |Swath| or |GridTimeSeries|, "GHRSST L2P, L3 and L4 datasets"
Contribs
......@@ -29,6 +38,7 @@ These are the contrib packages providing additional more specialized Dataset
classes, grouped by thematic, data provider, format family,...
:mod:`cerberecontrib-s3` [`doc <http://cerberecontrib-s3.readthedocs.io>`_] [`git <https://git.cersat.fr/cerbere/cerberecontrib-s3>`_] : a collection of mappers for Eumetsat Sentinel-3 products
.. csv-table:: **cerberecontrib-s3** package
......@@ -38,6 +48,8 @@ classes, grouped by thematic, data provider, format family,...
"Dataset", "Product"
:mod:`cerberecontrib-cfosat`
.. csv-table:: **cerberecontrib-cfosat** package
:header: "Dataset", "Compatible products"
......@@ -45,51 +57,51 @@ classes, grouped by thematic, data provider, format family,...
"Dataset", "Product"
:mod:`cerberecontrib-scatterometer`
.. csv-table:: **cerberecontrib-scatterometer** package
:header: "Dataset", "Compatible products"
:widths: 15, 40
"Dataset", "Product"
:mod:`cerberecontrib-sar`
NCDataset
---------
Generic mapper for a large collection of CF compliant NetCDF datasets.
.. csv-table:: **cerberecontrib-sar** package
:header: "Dataset", "Compatible products"
:widths: 15, 40
Grid
++++
"Dataset", "Product"
* **SMOS SSS** by CATDS CEC-OS (N. Reul)
:mod:`cerberecontrib-ers`
.. code-block:: python
.. csv-table:: **cerberecontrib-ers** package
:header: "Dataset", "Compatible products"
:widths: 15, 40
from cerbere.datamodel.grid import Grid
from cerbere.mapper.ncfile import NCDataset
ncf = NCDataset(url='SSS_SMOS_L3_Daily_0.5deg_CATDS_CECOS_2012.12.30_V02.nc')
g = Grid()
g.load(ncf)
"Dataset", "Product"
:mod:`cerberecontrib-topex`
* **Mixed Layer Depth climatology** by Ifremer (Boyer)
* **WaveWatch3/Hindcast** model output by Ifremer
.. csv-table:: **cerberecontrib-topex** package
:header: "Dataset", "Compatible products"
:widths: 15, 40
Swath
+++++
"Dataset", "Product"
* **AVISO L2 GDR** products for CryoSat-2, Saral/AltiKa
:mod:`cerberecontrib-envisat`
.. csv-table:: **cerberecontrib-envisat** package
:header: "Dataset", "Compatible products"
:widths: 15, 40
"Dataset", "Product"
GHRSSTNCFile
------------
:mod:`cerberecontrib-bufr`
Generic mapper for GHRSST L2P, L3 and L4 datasets.
.. csv-table:: **cerberecontrib-bufr** package
:header: "Dataset", "Compatible products"
:widths: 15, 40
Swath
+++++
"Dataset", "Product"
* **VIIRS L2P** by NAVO
* **AMSR2 L2P** by JAXA
* **(A)ATSR L2P** by ESA SST CCI/ARC
......@@ -11,6 +11,9 @@ a typology of observation objects (called **features**).
.. _NumPy: http://www.numpy.org
.. _xarray: http://xarray.pydata.org
.. _conda: https://docs.conda.io/en/latest/
Contents
========
......@@ -21,7 +24,6 @@ Contents
installation
working_with_data
behind_the_scene
compatibility
Available dataset classes
=========================
......@@ -31,18 +33,13 @@ Available dataset classes
compatibility
* **Contribs**
* :mod:`safeslfile` [`doc <http://cerberecontrib-s3.readthedocs.io>`_] [`git <https://git.cersat.fr/cerbere/cerberecontrib-s3>`_] : a collection of mappers for Eumetsat Sentinel-3 products
Developer's corner
==================
.. toctree::
:maxdepth: 2
writing_mapper
writing_dataset
unittesting
......
......@@ -6,87 +6,22 @@ Installation
Requirements
============
* BLAS
* ATLAS
* Lapack
* GRIB API
* HDF5
* HDF4
* Netcdf4
* numpy
* xarray
* Python 3
* GDAL
* Python
* Python pip
* gfortran
* gcc
* g++
Install these with your package manager (if a "dev" version of the package is available, use it).
Your Python package manager should handle most of the dependencies by himself, but there are some gotchas:
- Numpy is required by the setup.py scripts of other modules, so it must be installed beforehand (see https://github.com/pypa/pip/issues/25)
.. sourcecode :: bash
pip install numpy==1.8.0
- The versioning convention of the pyhdf package changed from major.minor-revision to major.minor.revision. This change prevents package managers to handle pyhdf correctly, you have to install it manually
.. sourcecode :: bash
cd /tmp
wget "http://downloads.sourceforge.net/project/pysclint/pyhdf/0.8.3/pyhdf-0.8.3.tar.gz"
tar xvzf pyhdf-0.8.3.tar.gz
cd pyhdf-0.8.3
# Change the following variables depending on your environment
export INCLUDE_DIRS=/usr/include/hdf
export LIBRARY_DIRS=/usr/lib
export NOSZIP=1
python setup.py install
.. note::
If the installation fails, check that the directory you specified in LIBRARY_DIRS contains libmfhdf.so and libdf.so.
They may have been renamed, preventing the linker to find them.
For example, on Ubuntu 12.04, you can find these libraries as "libmfhdfalt.so" and "libdfalt.so".
To fix this, edit setup.py and set the correct names in the "libraries" variable (line 88).
On Ubuntu 12.04, you can use the install.sh script provided in the cerbere package to install dependencies.
Install these with your package manager. We recommend using conda_ for this.
Cerbere
=======
Just use your package manager to perform installation
Just use your package manager to perform installation. There is no (yet) conda
package for cerbere. Install it with pip:
.. sourcecode :: bash
pip install ./cerbere-0.1.0.tar.gz
.. note::
If pip complains about a missing C file, you should try to install / update
distribute:
.. sourcecode:: bash
pip install -U distribute
\ No newline at end of file
pip install ./cerbere-2.0.tar.gz
This diff is collapsed.
================
Writing a mapper
================
.. |dataset| replace:: :mod:`~cerbere.dataset`
This section describes how to write a mapper that will allow access to a new
format in read mode (see at the end of the section complementary information
in case you also want to use this mapper to save data).
=============================
Writing a new |dataset| class
=============================
Creating a new mapper module
============================
Writing a mapper consists in writing a set of function that helps cerbere to
understand and access a file content. It must implements the interface defined
by :class:AbstractMapper class, and therefore inherit this class.
This section describes how to write a |dataset| class that will allow access to
a new format in read mode (see at the end of the section complementary information
in case you also want to use this |dataset| class to save data in a specific
format).
Create a new module with a basic structure as follow:
Creating a new |dataset| class
===============================
Writing a |dataset| class consists in writing a set of function that helps
cerbere to understand and access a file content. It must implements the
interface defined by :class:`~cerbere.dataset.dataset.Dataset` class, and
therefore inherit this class.
Lets create a new |dataset| class called ``MyDataset``:
Create first a new module ``mydataset.py`` file with a basic structure as
follow:
.. code-block:: python
"""
.. module::cerbere.mapper.<your mapper module name>
Mapper classs for <the format and/or product type handled by this mapper>
"""
Dataset class for <the format and/or product type handled by this class>
:license: Released under GPL v3 license, see :ref:`license`.
.. sectionauthor:: <your name>
.. codeauthor:: <your name>
"""
# import parent class
from cerbere.mapper import abstractmapper
class <Your mapper class name>(abstractmapper.AbstractMapper):
"""Mapper class to read <the format and/or product type handled by this mapper> files"""
def __init__(self, url=None, mode=abstractmapper.READ_ONLY, **kwargs):
"""Initialize a <the format and/or product type handled by this mapper> file mapper"""
super(<Your mapper class name>, self).__init__(url=url, mode=mode, **kwargs)
return
The following functions of the :class:AbstractMapper have to be overriden:
# import parent class
from cerbere.dataset.dataset import Dataset
class MyDataset(Dataset):
"""Dataset class to read <the format and/or product type handled by this
class> files
"""
def __init__(self, dataset, **kwargs):
"""Initialize a <the format and/or product type handled by this
class> file dataset
"""
return super(MyDataset, self).__init__(
dataset, **kwargs
)
The following functions of the :class:`~cerbere.dataset.dataset.Dataset` have
to be overriden, if necessary:
* __init__
* open()
* close
......
......@@ -14,7 +14,7 @@ import unittest
from cerbere.dataset.ncdataset import NCDataset
from cerbere.dataset.field import Field, Variable
from cerbere.dataset.field import Field
class TestField(unittest.TestCase):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment