fix(typos): Corrected many typos scattered throughout the code (#455)

develop
Mike Taves 2019-02-01 06:21:35 +13:00 committed by langevin-usgs
parent 82341ebe7c
commit 1b6a7d7089
111 changed files with 272 additions and 272 deletions

View File

@ -311,7 +311,7 @@ def stress_util2d(ml, nlay, nrow, ncol):
# util2d binary check
ml.lpf.vka[0].format.binary = True
# util3d cnstnt propogation test
# util3d cnstnt propagation test
ml.lpf.vka.cnstnt = 2.0
ml.write_input()

View File

@ -891,7 +891,7 @@ def test_wkt_parse():
wkttxt = wkttxt.replace("'", '"')
if len(wkttxt) > 0 and 'projcs' in wkttxt.lower():
crsobj = crs(esri_wkt=wkttxt)
geocs_params = ['wktstr', 'geogcs', 'datum', 'spheriod_name',
geocs_params = ['wktstr', 'geogcs', 'datum', 'spheroid_name',
'semi_major_axis', 'inverse_flattening',
'primem', 'gcs_unit']
for k in geocs_params:

View File

@ -56,7 +56,7 @@ def load_lak(mfnam, pth, run):
fn0 = os.path.join(lpth, mfnam)
# write free format files - wont run without resetting to free format - evt externa file issue
# write free format files - wont run without resetting to free format - evt external file issue
m.free_format_input = True
# rewrite files

View File

@ -109,7 +109,7 @@ def test_unitnums_load_and_write():
assert m.load_fail is False, 'failed to load all packages'
msg = 'modflow-2005 testsfr2_tab does not have ' + \
'1 layer, 7 rows, and 100 colummns'
'1 layer, 7 rows, and 100 columns'
v = (m.nlay, m.nrow, m.ncol, m.nper)
assert v == (1, 7, 100, 50), msg

View File

@ -110,7 +110,7 @@ def test_compare2zonebudget(rtol=1e-2):
return
# def test_comare2mflist_mlt(rtol=1e-2):
# def test_compare2mflist_mlt(rtol=1e-2):
#
# loadpth = os.path.join('..', 'examples', 'data', 'zonbud_examples', 'freyberg_mlt')
#
@ -233,7 +233,7 @@ def test_get_model_shape():
if __name__ == '__main__':
# test_comare2mflist_mlt()
# test_compare2mflist_mlt()
test_compare2zonebudget()
test_zonbud_aliases()
test_zonbud_to_csv()

View File

@ -1,5 +1,5 @@
"""
Test postprocessing utilties
Test postprocessing utilities
"""
import sys
@ -97,4 +97,4 @@ def test_get_sat_thickness_gradients():
if __name__ == '__main__':
#test_get_transmissivities()
#test_get_water_table()
test_get_sat_thickness_gradients()
test_get_sat_thickness_gradients()

View File

@ -105,7 +105,7 @@ def test_facenode_is3():
verticaldivisions4=0,
horizontaldivisions4=0,
rowdivisions5=0,
columndivisons5=0,
columndivisions5=0,
rowdivisions6=3,
columndivisions6=3)
p = flopy.modpath.NodeParticleData(subdivisiondata=sd, nodes=locs)
@ -139,7 +139,7 @@ def test_facenode_is3a():
verticaldivisions4=0,
horizontaldivisions4=0,
rowdivisions5=0,
columndivisons5=0,
columndivisions5=0,
rowdivisions6=3,
columndivisions6=3)
p = flopy.modpath.NodeParticleData(subdivisiondata=[sd, sd],
@ -166,7 +166,7 @@ def test_facenode_is2a():
verticaldivisions4=0,
horizontaldivisions4=0,
rowdivisions5=0,
columndivisons5=0,
columndivisions5=0,
rowdivisions6=3,
columndivisions6=3)
p = flopy.modpath.LRCParticleData(subdivisiondata=[sd, sd],

View File

@ -48,7 +48,7 @@ def test_lkt_with_multispecies():
## Instantiate solver package for MODFLOW-NWT
# Newton-Rhapson Solver: Create a flopy nwt package object
# Newton-Raphson Solver: Create a flopy nwt package object
headtol = 1.0E-4
fluxtol = 5

View File

@ -76,7 +76,7 @@ def test_gridgen():
g.build()
# test the different gridprops dictionaries, which contain all the
# information needed to make the different discretiztion packages
# information needed to make the different discretization packages
gridprops = g.get_gridprops_disv()
gridprops = g.get_gridprops()
#gridprops = g.get_gridprops_disu6()

View File

@ -832,7 +832,7 @@ def test005_advgw_tidal():
time_series_namerecord='rch_3',
interpolation_methodrecord='linear')
# charnge folder to save simulation
# change folder to save simulation
sim.simulation_data.mfpath.set_sim_path(run_folder)
# write simulation to new location
@ -1035,7 +1035,7 @@ def test035_fhb():
time_series_namerecord='head',
interpolation_methodrecord='linearend')
# charnge folder to save simulation
# change folder to save simulation
sim.simulation_data.mfpath.set_sim_path(run_folder)
# write simulation to new location
@ -1158,7 +1158,7 @@ def test006_gwf3_disv():
numgnc=24, numalphaj=1,
gncdata=gncrecarray)
# charnge folder to save simulation
# change folder to save simulation
sim.simulation_data.mfpath.set_sim_path(run_folder)
# write simulation to new location

View File

@ -6,7 +6,7 @@ Frequently Asked Questions
#### Working with existing models
+ [How do I load an existing MODFLOW-2005 model](../examples/FAQ/load_existing_mf2005.ipynb)
+ [How do I load an existing MODFLOW model with auxillary stress package data](../examples/FAQ/load_existing_model_wAUX.ipynb)
+ [How do I load an existing MODFLOW model with auxiliary stress package data](../examples/FAQ/load_existing_model_wAUX.ipynb)
#### Running a MODFLOW model

View File

@ -97,7 +97,7 @@ Instructions for making a FloPy release
from a terminal.
3. Pull upsteam [flopy-feedstock](https://github.com/conda-forge/flopy-feedstock) into local copy of the [flopy-feedstock fork](https://github.com/jdhughes-usgs/flopy-feedstock) repo:
3. Pull upstream [flopy-feedstock](https://github.com/conda-forge/flopy-feedstock) into local copy of the [flopy-feedstock fork](https://github.com/jdhughes-usgs/flopy-feedstock) repo:
```
cd /Users/jdhughes/Documents/Development/flopy-feedstock_git

View File

@ -36,7 +36,7 @@ MFData --* MFArray --* MFTransientArray
MFData --* MFList --* MFTransientList
MFData --* MFScalar --* MFTranientScalar
MFData --* MFScalar --* MFTransientScalar
MFTransientData --* MFTransientArray, MFTransientList, MFTransientScalar
@ -45,4 +45,4 @@ Figure 2: FPMF6 package and data classes. Lines connecting classes show a rela
There are three main types of data, MFList, MFArray, and MFScalar data. All three of these data types are derived from the MFData abstract base class. MFList data is the type of data stored in a spreadsheet with different column headings. For example, the data describing a flow barrier are of type MFList. MFList data is stored in numpy recarrays. MFArray data is data of a single type (eg. all integer values). For example, the model's HK values are of type MFArray. MFArrays are stored in numpy ndarrays. MFScalar data is a single data item. Most MFScalar data are options. All MFData subclasses contain an MFDataStructure object that defines the expected structure and types of the data.
Transient data, or data defined for each stress period (eg. data in the period blocks) is stored in MFTransientArray, MFTransientList, and MFTransientScalar. These classes are sub-classes of MFArray, MFList, and MFScalar, respectively. These classes are also subclasses of MFTransientData.
Transient data, or data defined for each stress period (eg. data in the period blocks) is stored in MFTransientArray, MFTransientList, and MFTransientScalar. These classes are sub-classes of MFArray, MFList, and MFScalar, respectively. These classes are also subclasses of MFTransientData.

View File

@ -78,7 +78,7 @@ The following jupyter Notebooks contain examples for using FloPy pre- and post-p
* apportion boundary fluxes (e.g. from an analytic element model) among model layers based on transmissivity.
* any other analysis where a distribution of transmissivity is needed for a specified vertical interval of the model.
+ An overview of utilties for [post-processing head results from MODFLOW](../examples/Notebooks/flopy3_Modflow_postprocessing_example.ipynb).
+ An overview of utilities for [post-processing head results from MODFLOW](../examples/Notebooks/flopy3_Modflow_postprocessing_example.ipynb).
#### ***Export examples***

View File

@ -23,7 +23,7 @@ FloPy Supported Packages
| Layer Property Flow (LPF) | Supported | Supported | Supported |
| Link-AMG (LMG) | Not supported | Not supported | Not supported |
| MODFLOW Link-MT3DMS (LMT) | Supported | Supported | Not supported |
| Multipler (MULT) | Not supported | Supported | Not supported |
| Multiplier (MULT) | Not supported | Supported | Not supported |
| Multi-Node Well 1 (MNW1) | Supported | Supported | Not supported |
| Multi-Node Well 2 (MNW2) | Supported | Supported | Not supported |
| Multi-Node Well Information (MNWI) | Supported | Supported | Not supported |
@ -44,7 +44,7 @@ FloPy Supported Packages
| Subsidence (SUB) | Supported | Supported | Not supported |
| Subsidence and Aquifer-System <br />Compaction (SWT) | Supported | Supported | Not supported |
| Upstream Weighted (UPW) | Supported | Supported | Not supported |
| Unzaturated Zone Flow (UZF) | Supported | Supported | Not supported |
| Unsaturated Zone Flow (UZF) | Supported | Supported | Not supported |
| Well (WEL) | Supported | Supported | Not supported |
| Zone (ZONE) | Not supported | Supported | Not supported |

View File

@ -40,7 +40,7 @@ FloPy Changes
* Fixed bug in `mfsfr.py` when writing kinematic data (`irtflg >0`).
* Fixed issue from change in MODFLOW 6 `inspect.getargspec()` method (for getting method arguments).
* Fixed MODFLOW 6 BINARY keyword for reading binary data from a file using `OPEN/CLOSE` (needs parentheses around it).
* Fixed bug in `mtlkt.py` when instatiating, loading, and/or writing lkt input file related to multi-species problems.
* Fixed bug in `mtlkt.py` when initiating, loading, and/or writing lkt input file related to multi-species problems.
### Version 3.2.9
@ -86,11 +86,11 @@ FloPy Changes
* Added support for FORTRAN free format array data using n*value where n is the number of times value is repeated.
* Added support for comma separators in 1D data in LPF and UPF files
* Added support for comma separators on non array data lines in DIS, BCF, LPF, UPW, HFB, and RCH Packages.
* Added `.reset_budgetunit()` method to OC package to faciltate saving cell-by-cell binary output to a single file for all packages that can save cell-by-cell output.
* Added `.reset_budgetunit()` method to OC package to facilitate saving cell-by-cell binary output to a single file for all packages that can save cell-by-cell output.
* Added a `.get_residual()` method to the `CellBudgetFile` class.
* Added support for binary stress period files (`OPEN/CLOSE filename (BINARY)`) in `wel` stress packages on load and instantiation. Will extend to other list-based MODFLOW stress packages.
* Added a new `flopy.utils.HeadUFile` Class (located in binaryfile.py) for reading unstructured head files from MODFLOW-USG. The `.get_data()` method for this class returns a list of one-dimensional head arrays for each layer.
* Added metadata.acdd class to fetch model metadata from ScienceBase.gov and manage CF/ACDD-complient metadata for NetCDF export
* Added metadata.acdd class to fetch model metadata from ScienceBase.gov and manage CF/ACDD-complaint metadata for NetCDF export
* Added sparse export option for boundary condition stress period data, where only cells for that B.C. are exported (for example, `package.stress_period_data.export('stuff.shp', sparse=True)`)
* Added additional SFR2 package functionality:
* `.export_linkages()` and `.export_outlets()` methods to export routing linkages and outlets
@ -177,7 +177,7 @@ FloPy Changes
* Added support for LAK and GAGE packages - full load and write functionality supported.
* Added support for MNW2 package. Load and write of .mnw2 package files supported. Support for .mnwi, or the results files (.qsu, .byn) not yet implemented.
* Improved support for changing the output format of arrays and variables written to MODFLOW input files.
* Restructued SEAWAT support so that packages can be added directly to the SEAWAT model, in addition to the approach of adding a modflow model and a mt3d model. Can now load a SEAWAT model.
* Restructured SEAWAT support so that packages can be added directly to the SEAWAT model, in addition to the approach of adding a modflow model and a mt3d model. Can now load a SEAWAT model.
* Added load support for MT3DMS Reactions package
* Added multi-species support for MT3DMS Reactions package
* Added static method to Mt3dms().load_mas that reads an MT3D mass file and returns a recarray

View File

@ -35,7 +35,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"**Load an existing MODFLOW-2005 model with auxillary variables**"
"**Load an existing MODFLOW-2005 model with auxiliary variables**"
]
},
{
@ -53,7 +53,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"**Auxillary IFACE data are in the river package**"
"**Auxiliary IFACE data are in the river package**"
]
},
{
@ -69,7 +69,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"You can confirm that the `iface` auxillary data have been read by looking at the `dtype`."
"You can confirm that the `iface` auxiliary data have been read by looking at the `dtype`."
]
},
{

View File

@ -119,7 +119,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"After you download/compile the executable put it in the current working directory or one of the directorys in your system path."
"After you download/compile the executable put it in the current working directory or one of the directories in your system path."
]
},
{

View File

@ -146,7 +146,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Newton-Rhapson Solver: Create a flopy nwt package object\n",
"# Newton-Raphson Solver: Create a flopy nwt package object\n",
"\n",
"headtol = 1.0E-4 \n",
"fluxtol = 5 \n",

View File

@ -477,7 +477,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"The plot_bc() method can be used to plot a cross-section of boundary conditions. Just like the plot_bc() method for ModelMap, the default bounday conditions can be changed in the method call.\n",
"The plot_bc() method can be used to plot a cross-section of boundary conditions. Just like the plot_bc() method for ModelMap, the default boundary conditions can be changed in the method call.\n",
"\n",
"Here, we plot the location of well cells in column 6."
]

View File

@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## This example shows some utilties for post-processing head results from MODFLOW"
"## This example shows some utilities for post-processing head results from MODFLOW"
]
},
{
@ -172,7 +172,7 @@
" ctr = axes[i].contour(hdslayer, colors='k', linewidths=0.5)\n",
" \n",
" # export head rasters \n",
" # (GeoTiff export requires the rasterio package; for ascii grids, just change the extention to *.asc)\n",
" # (GeoTiff export requires the rasterio package; for ascii grids, just change the extension to *.asc)\n",
" m.sr.export_array('data/heads{}.tif'.format(i+1), hdslayer)\n",
" \n",
" # export head contours to a shapefile\n",

View File

@ -702,7 +702,7 @@
" verticaldivisions2=10, horizontaldivisions2=10,\n",
" verticaldivisions3=10, horizontaldivisions3=10,\n",
" verticaldivisions4=10, horizontaldivisions4=10,\n",
" rowdivisions5=0, columndivisons5=0,\n",
" rowdivisions5=0, columndivisions5=0,\n",
" rowdivisions6=4, columndivisions6=4)\n",
"pb = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew)\n",
"# create forward particle group\n",

View File

@ -171,7 +171,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"The span variable will also accept 'layers', in which the parameter applies to the list of layers, as shown next. When 'layers' is specifed in the span dictionary, then the original hk value of 10. remains in the array, and the multiplier is specified on the array control line."
"The span variable will also accept 'layers', in which the parameter applies to the list of layers, as shown next. When 'layers' is specified in the span dictionary, then the original hk value of 10. remains in the array, and the multiplier is specified on the array control line."
]
},
{

View File

@ -21,7 +21,7 @@
" * MODFLOW Groundwater Flow Model Class. Represents a single model in a simulation.\n",
"\n",
"flopy.mf6.Modflow[pc]\n",
" * MODFLOW package classes where [pc] is the abbreviation of the package name. Each package is a seperate class. \n",
" * MODFLOW package classes where [pc] is the abbreviation of the package name. Each package is a separate class. \n",
"\n",
"For packages that are part of a groundwater flow model, the abbreviation begins with \"Gwf\". For example, \"flopy.mf6.ModflowGwfdis\" is the Discretization package.\n",
" "
@ -330,7 +330,7 @@
"\n",
"* maxbound - The number of rows in the recarray. If not specified one row is returned.\n",
"\n",
"* aux_vars - List of auxillary variable names. If not specified auxillary variables are not used.\n",
"* aux_vars - List of auxiliary variable names. If not specified auxiliary variables are not used.\n",
"\n",
"* boundnames - True/False if boundnames is to be used.\n",
"\n",
@ -427,7 +427,7 @@
"source": [
"### Specifying MFList Data in an External File \n",
"\n",
"MFList data can be specified in an exernal file using a dictionary with the 'filename' key. If the 'data' key is also included in the dictionary and is not None, flopy will create the file with the data contained in the 'data' key. The code below creates a chd package which creates and references an external file containing data for stress period 1 and stores the data internally in the chd package file for stress period 2. "
"MFList data can be specified in an external file using a dictionary with the 'filename' key. If the 'data' key is also included in the dictionary and is not None, flopy will create the file with the data contained in the 'data' key. The code below creates a chd package which creates and references an external file containing data for stress period 1 and stores the data internally in the chd package file for stress period 2. "
]
},
{

View File

@ -209,7 +209,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"#### convert the DataFrame to a rec array for compatibility with flopy"
"#### convert the DataFrame to a recarray for compatibility with flopy"
]
},
{

View File

@ -6,9 +6,9 @@
"source": [
"# Flopy MODFLOW Boundary Conditions\n",
"\n",
"Flopy has a new way to enter boundary conditions for some MODFLOW packages. These changes are substantial. Boundary condtions can now be entered as a list of boundaries, as a numpy recarray, or as a dictionary. These different styles are described in this notebook.\n",
"Flopy has a new way to enter boundary conditions for some MODFLOW packages. These changes are substantial. Boundary conditions can now be entered as a list of boundaries, as a numpy recarray, or as a dictionary. These different styles are described in this notebook.\n",
"\n",
"Flopy also now requires zero-based input. This means that **all boundaries are entered in zero-based layer, row, and column indicies**. This means that older Flopy scripts will need to be modified to account for this change. If you are familiar with Python, this should be natural, but if not, then it may take some time to get used to zero-based numbering. Flopy users submit all information in zero-based form, and Flopy converts this to the one-based form required by MODFLOW.\n",
"Flopy also now requires zero-based input. This means that **all boundaries are entered in zero-based layer, row, and column indices**. This means that older Flopy scripts will need to be modified to account for this change. If you are familiar with Python, this should be natural, but if not, then it may take some time to get used to zero-based numbering. Flopy users submit all information in zero-based form, and Flopy converts this to the one-based form required by MODFLOW.\n",
"\n",
"The following MODFLOW packages are affected by this change:\n",
"\n",

View File

@ -148,7 +148,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Newton-Rhapson Solver: Create a flopy nwt package object\n",
"# Newton-Raphson Solver: Create a flopy nwt package object\n",
"\n",
"headtol = 1.0E-4 \n",
"fluxtol = 5 \n",
@ -301,9 +301,9 @@
"elev_slp = (308.82281 - 298.83649) / (ncol - 1)\n",
"\n",
"sp = []\n",
"for k in [0,1,2]: # These indicies need to be adjusted for 0-based moronicism\n",
" for i in [0,299]: # These indicies need to be adjusted for 0-based silliness\n",
" for j in np.arange(0,300,1): # These indicies need to be adjusted for 0-based foolishness\n",
"for k in [0,1,2]: # These indices need to be adjusted for 0-based moronicism\n",
" for i in [0,299]: # These indices need to be adjusted for 0-based silliness\n",
" for j in np.arange(0,300,1): # These indices need to be adjusted for 0-based foolishness\n",
" # Skipping cells not satisfying the conditions below\n",
" if ((i == 1 and (j < 27 or j > 31)) or (i==299 and (j < 26 or j > 31))):\n",
" if (i % 2 == 0):\n",

View File

@ -136,7 +136,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"The Triange class creates a .node and a .poly file as input for the Triangle program. The Triangle class then reads four output files from the Triangle program into numpy structured arrays. These four structured arrays are stored with the object as follows."
"The Triangle class creates a .node and a .poly file as input for the Triangle program. The Triangle class then reads four output files from the Triangle program into numpy structured arrays. These four structured arrays are stored with the object as follows."
]
},
{

View File

@ -19,7 +19,7 @@ Well model will use SKIN !! Other options are Linear and NonLinear:2.00 --Ex
2 15 9 0 -1
2 13 7 0 -1 SITE:Simple-C
1 18 4 0 -1.
# Multi-node switch Switch to specify Hlim Auxillary
# Multi-node switch Switch to specify Hlim Auxiliary
# | as difference from Href definitions
# | |
# lay row col Q | Conc rw Skin | Hlim Href QWZN |

View File

@ -2,7 +2,7 @@
# EXAMPLE INPUT DATA FILE FOR THE LINK-MT3DMS (LMT7) PACKAGE
#
# The capital letters are keywords that must not be altered except that
# thet can appear in either uppercase or lowercase.
# they can appear in either uppercase or lowercase.
#
# Assign any valid file name for the flow-transport link file
# to be produced by the LMT Package. Leave the input blank to use the

View File

@ -2,7 +2,7 @@
# EXAMPLE INPUT DATA FILE FOR THE LINK-MT3DMS (LMT6) PACKAGE
#
# The capital letters are keywords that must not be altered except that
# thet can appear in either uppercase or lowercase.
# they can appear in either uppercase or lowercase.
#
# Assign any valid file name for the flow-transport link file
# to be produced by the LMT Package. Leave the input blank to use the

View File

@ -2,7 +2,7 @@
# EXAMPLE INPUT DATA FILE FOR THE LINK-MT3DMS (LMT6) PACKAGE
#
# The capital letters are keywords that must not be altered except that
# thet can appear in either uppercase or lowercase.
# they can appear in either uppercase or lowercase.
#
# Assign any valid file name for the flow-transport link file
# to be produced by the LMT Package. Leave the input blank to use the

View File

@ -2,7 +2,7 @@
# EXAMPLE INPUT DATA FILE FOR THE LINK-MT3DMS (LMT6) PACKAGE
#
# The capital letters are keywords that must not be altered except that
# thet can appear in either uppercase or lowercase.
# they can appear in either uppercase or lowercase.
#
# Assign any valid file name for the flow-transport link file
# to be produced by the LMT Package. Leave the input blank to use the

View File

@ -2,7 +2,7 @@
# EXAMPLE INPUT DATA FILE FOR THE LINK-MT3DMS (LMT6) PACKAGE
#
# The capital letters are keywords that must not be altered except that
# thet can appear in either uppercase or lowercase.
# they can appear in either uppercase or lowercase.
#
# Assign any valid file name for the flow-transport link file
# to be produced by the LMT Package. Leave the input blank to use the

View File

@ -2,7 +2,7 @@
# EXAMPLE INPUT DATA FILE FOR THE LINK-MT3DMS (LMT6) PACKAGE
#
# The capital letters are keywords that must not be altered except that
# thet can appear in either uppercase or lowercase.
# they can appear in either uppercase or lowercase.
#
# Assign any valid file name for the flow-transport link file
# to be produced by the LMT Package. Leave the input blank to use the

View File

@ -2,7 +2,7 @@
# EXAMPLE INPUT DATA FILE FOR THE LINK-MT3DMS (LMT6) PACKAGE
#
# The capital letters are keywords that must not be altered except that
# thet can appear in either uppercase or lowercase.
# they can appear in either uppercase or lowercase.
#
# Assign any valid file name for the flow-transport link file
# to be produced by the LMT Package. Leave the input blank to use the

View File

@ -2,7 +2,7 @@
# EXAMPLE INPUT DATA FILE FOR THE LINK-MT3DMS (LMT6) PACKAGE
#
# The capital letters are keywords that must not be altered except that
# thet can appear in either uppercase or lowercase.
# they can appear in either uppercase or lowercase.
#
# Assign any valid file name for the flow-transport link file
# to be produced by the LMT Package. Leave the input blank to use the

View File

@ -1,4 +1,4 @@
MT3DMS Benchmark Test Porblem #2
MT3DMS Benchmark Test Problem #2
1D advection-dispersion with nonequilibrium sorption
1 1 101 3 1 1
SEC CM G

View File

@ -32,7 +32,7 @@
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-----
| M T | MT3DMS Benchmark Test Porblem #2
| M T | MT3DMS Benchmark Test Problem #2
| 3 D | 1D advection-dispersion with nonequilibrium sorption
-----
THE TRANSPORT MODEL CONSISTS OF 1 LAYER(S) 1 ROW(S) 101 COLUMN(S)

View File

@ -2,6 +2,6 @@
0 10. |AL-LAYER3
0 10. |AL-LAYER3
0 10. |AL-LAYER4
0 .20 |TRPT, ENTERED AS A UNIROM VALUE
0 .20 |TRPT, ENTERED AS A UNIFORM VALUE
0 .20 |TRPV
0 0. |DMCOEF

View File

@ -125,7 +125,7 @@
" PROJ4 string that defines the xul-yul coordinate system\n",
" (.e.g. '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ').\n",
" Can be an EPSG code (e.g. 'EPSG:4326'). Default is 'EPSG:4326'\n",
" start_dateteim : str\n",
" start_datetime : str\n",
" starting datetime of the simulation. default is '1/1/1970'\n",
"\n",
" Attributes\n",

View File

@ -287,7 +287,7 @@ def doit():
#close summary file
fs.close()
# clean up working directorys
# clean up working directories
for idx in range(nproc):
filelist = [f for f in os.listdir(cf_pths[idx])]
for f in filelist:

View File

@ -626,7 +626,7 @@ class NetCdf(object):
proj4_str = "+init=" + proj4_str
self.log("building grid crs using proj4 string: {0}".format(proj4_str))
try:
self.grid_crs = Proj(proj4_str, preseve_units=True, errcheck=True)
self.grid_crs = Proj(proj4_str, preserve_units=True, errcheck=True)
except Exception as e:
self.log("error building grid crs:\n{0}".format(str(e)))

View File

@ -325,7 +325,7 @@ def output_helper(f, ml, oudic, **kwargs):
mask_array3d=mask_array3d)
else:
estr = "unrecognized file extention:{0}".format(filename)
estr = "unrecognized file extension:{0}".format(filename)
if logger:
logger.lraise(estr)
else:

View File

@ -965,7 +965,7 @@ class BaseModel(object):
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print('\nReseting free_format_input to True to ' +
print('\nResetting free_format_input to True to ' +
'preserve the precision of the parameter data.')
self.free_format_input = True
@ -980,7 +980,7 @@ class BaseModel(object):
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check arguemnt,
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
@ -1312,7 +1312,7 @@ def run_model(exe_name, namefile, model_ws='./',
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
use_async : boolean
asynchonously read model stdout and report with timestamps. good for
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings

View File

@ -299,8 +299,8 @@ class ModelGrid(object):
simulation_data : object
contains all simulation related data
grid_type : enumeration
type of model grid (DiscritizationType.DIS, DiscritizationType.DISV,
DiscritizationType.DISU)
type of model grid (DiscretizationType.DIS, DiscretizationType.DISV,
DiscretizationType.DISU)
Methods
----------
@ -310,7 +310,7 @@ class ModelGrid(object):
returns True if the grid type is consistent with the current
simulation data
grid_connections_array : ()
for DiscritizationType.DISU grids, returns an array containing the
for DiscretizationType.DISU grids, returns an array containing the
number of connections of it cell
get_horizontal_cross_section_dim_arrays : ()
returns a list of numpy ndarrays sized to the horizontal cross section
@ -327,24 +327,24 @@ class ModelGrid(object):
returns a numpy ndarray sized to a model layer
get_horizontal_cross_section_dim_names : ()
returns the appropriate dimension axis for a horizontal cross section
based on the model discritization type
based on the model discretization type
get_model_dim_names : ()
returns the names of the model dimensions based on the model
discritization type
discretization type
get_num_spatial_coordinates : ()
returns the number of spatial coordinates based on the model
discritization type
discretization type
num_rows
returns the number of model rows. model discritization type must be
returns the number of model rows. model discretization type must be
DIS
num_columns
returns the number of model columns. model discritization type must
returns the number of model columns. model discretization type must
be DIS
num_connections
returns the number of model connections. model discritization type
returns the number of model connections. model discretization type
must be DIS
num_cells_per_layer
returns the number of cells per model layer. model discritization
returns the number of cells per model layer. model discretization
type must be DIS or DISV
num_layers
returns the number of layers in the model
@ -353,7 +353,7 @@ class ModelGrid(object):
get_all_model_cells
returns a list of all model cells, represented as a layer/row/column
tuple, a layer/cellid tuple, or a cellid for the DIS, DISV, and DISU
discritizations, respectively
discretizations, respectively
See Also
--------
@ -385,7 +385,7 @@ class ModelGrid(object):
name of a model in the simulation
Returns
-------
grid type : DiscritizationType
grid type : DiscretizationType
"""
package_recarray = simulation_data.mfdata[
(model_name, 'nam', 'packages', 'packages')]
@ -513,7 +513,7 @@ class ModelGrid(object):
def change_grid_spacing(self, spacing_factor):
self.test = 1
def change_discritization_type(self, new_dis_type):
def change_discretization_type(self, new_dis_type):
self.test = 1
def num_rows(self):
@ -555,7 +555,7 @@ class ModelGrid(object):
(self._model_name, 'disv', 'dimensions', 'ncpl')].get_data()
elif self.grid_type() == DiscretizationType.DISU:
except_str = 'ERROR: Model "{}" is unstructured and does not ' \
'have a consistant number of cells per ' \
'have a consistent number of cells per ' \
'layer.'.format(self._model_name)
print(except_str)
raise MFGridException(except_str)

View File

@ -333,7 +333,7 @@ description REPLACE boundname {'{#1}': 'lake'}
block connectiondata
name connectiondata
type recarray lakeno iconn cellid claktype bedleak belev telev connlen connwidth
shape (sum(nlakecon))
shape (sum(nlakeconn))
reader urword
longname
description
@ -378,7 +378,7 @@ tagged false
in_record true
reader urword
longname lake connection type
description character string that defines the lake-GWF connection type for the lake connection. Possible lake-GWF connection type strings include: VERTICAL--character keyword to indicate the lake-GWF connection is vertical and connection conductance calculations use the hydraulic conductivity corresponding to the $K_{33}$ tensor component defined for CELLID in the NPF package. HORIZONTAL--character keyword to indicate the lake-GWF connection is horizontal and connection conductance calculations use the hydraulic conductivity corresponding to the $K_{11}$ tensor component defined for CELLID in the NPF package. EMBEDDEDH--character keyword to indicate the lake-GWF connection is embedded in a single cell and connection conductance calculations use the hydraulic conductivity corresponding to the $K_{11}$ tensor component defined for CELLID in the NPF package. EMBEDDEDV--character keyword to indicate the lake-GWF connection is embedded in a single cell and connection conductance calculations use the hydraulic conductivity corresponding to the $K_{33}$ tensor component defined for CELLID in the NPF package. Embedded lakes can only be connected to a single cell (NLAKCONN = 1) and there must be a lake table associated with each embedded lake.
description character string that defines the lake-GWF connection type for the lake connection. Possible lake-GWF connection type strings include: VERTICAL--character keyword to indicate the lake-GWF connection is vertical and connection conductance calculations use the hydraulic conductivity corresponding to the $K_{33}$ tensor component defined for CELLID in the NPF package. HORIZONTAL--character keyword to indicate the lake-GWF connection is horizontal and connection conductance calculations use the hydraulic conductivity corresponding to the $K_{11}$ tensor component defined for CELLID in the NPF package. EMBEDDEDH--character keyword to indicate the lake-GWF connection is embedded in a single cell and connection conductance calculations use the hydraulic conductivity corresponding to the $K_{11}$ tensor component defined for CELLID in the NPF package. EMBEDDEDV--character keyword to indicate the lake-GWF connection is embedded in a single cell and connection conductance calculations use the hydraulic conductivity corresponding to the $K_{33}$ tensor component defined for CELLID in the NPF package. Embedded lakes can only be connected to a single cell (NLAKECONN = 1) and there must be a lake table associated with each embedded lake.
block connectiondata
name bedleak

View File

@ -78,7 +78,7 @@ valid
reader urword
optional true
longname steady state indicator
description keyword to indicate that stress-period IPER is steady-state. Steady-state conditions will apply until the TRANSIENT keyword is specified in a subsequent BEGIN PERIOD block.
description keyword to indicate that stress period IPER is steady-state. Steady-state conditions will apply until the TRANSIENT keyword is specified in a subsequent BEGIN PERIOD block.
block period
name transient
@ -88,5 +88,5 @@ valid
reader urword
optional true
longname transient indicator
description keyword to indicate that stress-period IPER is transient. Transient conditions will apply until the STEADY-STATE keyword is specified in a subsequent BEGIN PERIOD block.
description keyword to indicate that stress period IPER is transient. Transient conditions will apply until the STEADY-STATE keyword is specified in a subsequent BEGIN PERIOD block.

View File

@ -258,7 +258,7 @@ type integer
reader urword
optional false
longname number of wave sets
description is the number of UZF cells specified. NWAVSETS has a default value of 40 and can be increased if more waves are required to resolve variations in water content within the unsaturated zone.
description is the number of UZF cells specified. NWAVESETS has a default value of 40 and can be increased if more waves are required to resolve variations in water content within the unsaturated zone.
# --------------------- gwf uzf packagedata ---------------------

View File

@ -14,7 +14,7 @@ type integer
reader urword
optional false
longname number of table columns
description integer value specifying the number of colums in the lake table. There must be NCOL columns of data in the TABLE block. For lakes with HORIZONTAL and/or VERTICAL CTYPE connections, NCOL must be equal to 3. For lakes with EMBEDDEDH or EMBEDDEDV CTYPE connections, NCOL must be equal to 4.
description integer value specifying the number of columns in the lake table. There must be NCOL columns of data in the TABLE block. For lakes with HORIZONTAL and/or VERTICAL CTYPE connections, NCOL must be equal to 3. For lakes with EMBEDDEDH or EMBEDDEDV CTYPE connections, NCOL must be equal to 4.
# --------------------- gwf laktab table ---------------------

View File

@ -1329,7 +1329,7 @@ class DataStorage(object):
# currently only support files containing ndarrays
if self.data_structure_type != DataStructureType.ndarray:
path = self.data_dimensions.structure.path
message= 'Can not convert {} to internal data. Exernal to ' \
message= 'Can not convert {} to internal data. External to ' \
'internal file operations currently only supported ' \
'for ndarrays.'.format(path[-1])
type_, value_, traceback_ = sys.exc_info()
@ -2406,7 +2406,7 @@ class MFData(object):
<package>, <block>, <data>)
dimensions : DataDimensions
object used to retrieve dimension information about data
*arges, **kwargs : exists to support different child class parameter sets
*args, **kwargs : exists to support different child class parameter sets
with extra init parameters
Attributes

View File

@ -487,7 +487,7 @@ class MFList(mfdata.MFMultiDimVar):
k_data_item.possible_cellid,
k_data_item))
except Exception as ex:
message = 'An error occured ' \
message = 'An error occurred ' \
'while converting data '\
'to a string. This ' \
'error occurred while ' \
@ -543,7 +543,7 @@ class MFList(mfdata.MFMultiDimVar):
possible_cellid,
data_item))
except Exception as ex:
message = 'An error occured while ' \
message = 'An error occurred while ' \
'converting data to a ' \
'string. ' \
'This error occurred while ' \
@ -705,7 +705,7 @@ class MFList(mfdata.MFMultiDimVar):
arr_line[0][:3].upper() == 'END'):
# end of block
if store_data:
# store as rec array
# store as recarray
storage.set_data(data_loaded, self._current_key)
self._data_dimensions.unlock()
return [False, line]
@ -773,7 +773,7 @@ class MFList(mfdata.MFMultiDimVar):
self._simulation_data.debug, ex)
line_num += 1
if store_data:
# store as rec array
# store as recarray
storage.set_data(data_loaded, self._current_key)
self._data_dimensions.unlock()
return [False, None]
@ -1100,8 +1100,8 @@ class MFList(mfdata.MFMultiDimVar):
def _append_data(self, data_item, arr_line, arr_line_len, data_index,
var_index, repeat_count):
# append to a 2-D list which will later be converted to a numpy
# rec array
storge = self._get_storage_obj()
# recarray
storage = self._get_storage_obj()
self._last_line_info.append([])
if data_item.is_cellid or (data_item.possible_cellid and
self._validate_cellid(arr_line,
@ -1172,8 +1172,8 @@ class MFList(mfdata.MFMultiDimVar):
traceback_, comment,
self._simulation_data.debug)
data_converted = storge.convert_data(arr_line[index],
data_item.type)
data_converted = storage.convert_data(arr_line[index],
data_item.type)
cellid_tuple = cellid_tuple + (int(data_converted) - 1,)
self._last_line_info[-1].append([index, 'integer',
cellid_size])
@ -1199,12 +1199,12 @@ class MFList(mfdata.MFMultiDimVar):
data_converted = arr_line[data_index].lower()
# override recarray data type to support writing
# string values
storge.override_data_type(var_index, object)
storage.override_data_type(var_index, object)
self._last_line_info[-1].append([data_index, 'string', 0])
else:
data_converted = storge.convert_data(arr_line[data_index],
data_item.type,
data_item)
data_converted = storage.convert_data(arr_line[data_index],
data_item.type,
data_item)
self._last_line_info[-1].append([data_index,
data_item.type, 0])
self._data_line = self._data_line + (data_converted,)

View File

@ -106,11 +106,11 @@ class MFScalar(mfdata.MFData):
if (isinstance(data, list) or isinstance(data, tuple)) and \
len(data) > 1:
self._add_data_line_comment(data[1:], 0)
storge = self._get_storage_obj()
storage = self._get_storage_obj()
data_struct = self.structure.data_item_structures[0]
try:
converted_data = storge.convert_data(data, self._data_type,
data_struct)
converted_data = storage.convert_data(data, self._data_type,
data_struct)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
comment = 'Could not convert data "{}" to type ' \
@ -124,7 +124,7 @@ class MFScalar(mfdata.MFData):
value_, traceback_, comment,
self._simulation_data.debug, ex)
try:
storge.set_data(converted_data, key=self._current_key)
storage.set_data(converted_data, key=self._current_key)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
comment = 'Could not set data "{}" to type ' \

View File

@ -132,7 +132,7 @@ class ArrayTemplateGenerator(TemplateGenerator):
data_struct, data_dimensions = self._get_data_dimensions(model)
datum_type = data_struct.get_datum_type()
data_type = data_struct.get_datatype()
# build a temporary data storge object
# build a temporary data storage object
data_storage = mfdata.DataStorage(
model.simulation_data, data_dimensions, None,
mfdata.DataStorageType.internal_array,
@ -268,7 +268,7 @@ class ListTemplateGenerator(TemplateGenerator):
data_struct, data_dimensions = self._get_data_dimensions(model)
data_type = data_struct.get_datatype()
# build a temporary data storge object
# build a temporary data storage object
data_storage = mfdata.DataStorage(
model.simulation_data, data_dimensions, None,
mfdata.DataStorageType.internal_array,
@ -290,7 +290,7 @@ class ListTemplateGenerator(TemplateGenerator):
for index in range(0, len(type_list)):
type_list[index] = (type_list[index][0], object)
# build rec array
# build recarray
template_data = self._build_template_data(type_list)
rec_array_data = []
if maxbound is not None:
@ -366,7 +366,7 @@ class ArrayUtil(object):
compares two lists, returns true if they are identical (with max_error)
spilt_data_line : (line : string) : list
splits a string apart (using split) and then cleans up the results
dealing with various MODFLOW input file releated delimiters. returns
dealing with various MODFLOW input file related delimiters. returns
the delimiter type used.
clean_numeric : (text : string) : string
returns a cleaned up version of 'text' with only numeric characters
@ -506,7 +506,7 @@ class ArrayUtil(object):
else:
clean_line = line.strip().split()
if external_file:
# try lots of different delimitiers for external files and use the
# try lots of different delimiters for external files and use the
# one the breaks the data apart the most
max_split_size = len(clean_line)
max_split_type = None

View File

@ -164,7 +164,7 @@ class DfnPackage(Dfn):
returns flag for multi-package support
get_block_structure_dict : (path : tuple, common : bool, model_file :
bool) : dict
returns a dictionray of block structure information for the package
returns a dictionary of block structure information for the package
See Also
--------
@ -371,7 +371,7 @@ class DfnFile(Dfn):
the data item name as the dictionary key
get_block_structure_dict : (path : tuple, common : bool, model_file :
bool) : dict
returns a dictionray of block structure information for the package
returns a dictionary of block structure information for the package
See Also
--------

View File

@ -596,7 +596,7 @@ class MFModel(PackageContainer):
if len(pkg_type) > 3 and pkg_type[-1] == 'A':
pkg_type = pkg_type[0:-1]
# Model Assumption - assuming all name files have a package
# rec array
# recarray
self.name_file.packages.\
update_record(['{}6'.format(pkg_type), package.filename,
package.package_name], 0)

View File

@ -168,7 +168,7 @@ class ModflowGwflak(mfpackage.MFPackage):
connection conductance calculations use the hydraulic conductivity
corresponding to the :math:`K_{33}` tensor component defined for
CELLID in the NPF package. Embedded lakes can only be connected to a
single cell (NLAKCONN = 1) and there must be a lake table associated
single cell (NLAKECONN = 1) and there must be a lake table associated
with each embedded lake.
* bedleak (double) character string or real value that defines the bed
leakance for the lake-GWF connection. BEDLEAK must be greater than or
@ -486,7 +486,7 @@ class ModflowGwflak(mfpackage.MFPackage):
["block connectiondata", "name connectiondata",
"type recarray lakeno iconn cellid claktype bedleak belev telev "
"connlen connwidth",
"shape (sum(nlakecon))", "reader urword"],
"shape (sum(nlakeconn))", "reader urword"],
["block connectiondata", "name lakeno", "type integer", "shape",
"tagged false", "in_record true", "reader urword",
"numeric_index true"],

View File

@ -38,11 +38,11 @@ class ModflowGwfsto(mfpackage.MFPackage):
than or equal to 0. Specific yield does not have to be specified if
there are no convertible cells (ICONVERT=0 in every cell).
steady_state : boolean
* steady-state (boolean) keyword to indicate that stress-period IPER is
* steady-state (boolean) keyword to indicate that stress period IPER is
steady-state. Steady-state conditions will apply until the TRANSIENT
keyword is specified in a subsequent BEGIN PERIOD block.
transient : boolean
* transient (boolean) keyword to indicate that stress-period IPER is
* transient (boolean) keyword to indicate that stress period IPER is
transient. Transient conditions will apply until the STEADY-STATE
keyword is specified in a subsequent BEGIN PERIOD block.
fname : String

View File

@ -108,7 +108,7 @@ class ModflowGwfuzf(mfpackage.MFPackage):
has a default value of 7 and can be increased to lower mass balance
error in the unsaturated zone.
nwavesets : integer
* nwavesets (integer) is the number of UZF cells specified. NWAVSETS
* nwavesets (integer) is the number of UZF cells specified. NWAVESETS
has a default value of 40 and can be increased if more waves are
required to resolve variations in water content within the
unsaturated zone.

View File

@ -20,7 +20,7 @@ class ModflowUtllaktab(mfpackage.MFPackage):
* nrow (integer) integer value specifying the number of rows in the
lake table. There must be NROW rows of data in the TABLE block.
ncol : integer
* ncol (integer) integer value specifying the number of colums in the
* ncol (integer) integer value specifying the number of columns in the
lake table. There must be NCOL columns of data in the TABLE block.
For lakes with HORIZONTAL and/or VERTICAL CTYPE connections, NCOL
must be equal to 3. For lakes with EMBEDDEDH or EMBEDDEDV CTYPE

View File

@ -81,7 +81,7 @@ class MFOutputRequester:
if key in self.dataDict:
if (key[0], 'disv', 'dimensions', 'nvert') in self.mfdict:
self.querybinarydata = \
self._querybinarydata_verticed(self.mfdict, key)
self._querybinarydata_vertices(self.mfdict, key)
elif (key[0], 'disu', 'connectiondata', 'iac') in self.mfdict:
self.querybinarydata = self._querybinarydata_unstructured(key)
else:
@ -108,7 +108,7 @@ class MFOutputRequester:
else:
return np.array(bindata.get_alldata())
def _querybinarydata_verticed(self, mfdict, key):
def _querybinarydata_vertices(self, mfdict, key):
# Basic definition to get output data from binary output files for
# simulations that define grid by vertices
path = self.dataDict[key]
@ -198,7 +198,7 @@ class MFOutputRequester:
Returns
-------
information defining specifice vertices for all model cells to be added
information defining specified vertices for all model cells to be added
to xarray as coordinates.
cellid: (list) corresponds to the modflow CELL2d cell number
xcyc: (n x 2) dimensional Pandas object of tuples defining the CELL2d
@ -209,9 +209,9 @@ class MFOutputRequester:
yv: (n x nverts) dimensional Pandas object of tuples. Contains y
vertices for a cell
topv: (n x nlayers) dimensional Pandas object of cell top elevations
coresponding to a row column location
corresponding to a row column location
botmv: (n x nlayers) dimensional Pandas object of cell bottom
elevations coresponding to a row column location
elevations corresponding to a row column location
"""
try:
@ -351,7 +351,7 @@ class MFOutputRequester:
def _reshape_binary_data(data, dtype=None):
# removes unneccesary dimensions from data returned by
# removes unnecessary dimensions from data returned by
# flopy.utils.binaryfile
time = len(data)
data = np.array(data)

View File

@ -57,7 +57,7 @@ class Observations:
parameters:
-----------
text = (str) specific modflow record name contained in Obs.out file
idx = (int), (slice(start, stop)) interger or slice of data to be
idx = (int), (slice(start, stop)) integer or slice of data to be
returned. corresponds to kstp*kper - 1
totim = (float) model time value to return data from
@ -179,7 +179,7 @@ class Observations:
Parameters
----------
keys: (string) sting of dictionary/observation keys seperated by comma.
keys: (string) sting of dictionary/observation keys separated by comma.
(optional)
idx: (int) time index location (optional)
totim: (float) simulation time (optional)
@ -270,7 +270,7 @@ class Observations:
elif key_type is list:
pass
else:
err = 'Invalid key type: supply a strg of keys seperated by , ' \
err = 'Invalid key type: supply a string of keys separated by , ' \
'or a list of keys'
raise TypeError(err)
return keys
@ -433,7 +433,7 @@ class MFObservationRequester:
----------
partial_key: (list) partial dictionary key
OBS8: (string) OBS8 mfdict key name
obstype: (string) SINGLE or CONTINUOS
obstype: (string) SINGLE or CONTINUOUS
Returns:
--------

View File

@ -749,7 +749,7 @@ class VertexSpatialReference(object):
def _get_rotated_vertices(self):
"""
Adjusts position and rotates verticies if applicable
Adjusts position and rotates vertices if applicable
Returns
-------
@ -862,7 +862,7 @@ class SpatialReference(object):
'implemented')
else:
raise TypeError('Discretation type {} not '
raise TypeError('Discretization type {} not '
'supported'.format(distype))
return new

View File

@ -23,7 +23,7 @@ class ModflowBcf(Package):
(default is 53)
intercellt : int
Intercell transmissivities, harmonic mean (0), arithmetic mean (1),
logarithmetic mean (2), combination (3). (default is 0)
logarithmic mean (2), combination (3). (default is 0)
laycon : int
Layer type, confined (0), unconfined (1), constant T, variable S (2),
variable T, variable S (default is 3)
@ -39,7 +39,7 @@ class ModflowBcf(Package):
iwetit : int
iteration interval in wetting/drying algorithm (default is 1)
ihdwet : int
flag to indicate how initial head is computd for cells that become
flag to indicate how initial head is computed for cells that become
wet (default is 0)
tran : float or array of floats (nlay, nrow, ncol), optional
transmissivity (only read if laycon is 0 or 2) (default is 1.0)

View File

@ -175,7 +175,7 @@ class ModflowChd(Package):
@staticmethod
def get_empty(ncells=0, aux_names=None, structured=True):
# get an empty recaray that corresponds to dtype
# get an empty recarray that corresponds to dtype
dtype = ModflowChd.get_default_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)

View File

@ -88,7 +88,7 @@ class ModflowDis(Package):
PROJ4 string that defines the xul-yul coordinate system
(.e.g. '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ').
Can be an EPSG code (e.g. 'EPSG:4326'). Default is 'EPSG:4326'
start_dateteim : str
start_datetime : str
starting datetime of the simulation. default is '1/1/1970'
Attributes
@ -397,7 +397,7 @@ class ModflowDis(Package):
y[r] = self.delc[r] / 2.
else:
y[r] = y[r - 1] + (self.delc[r] + self.delc[r - 1]) / 2.
# Invert y to convert to a cartesian coordiante system
# Invert y to convert to a Cartesian coordinate system
y = y[::-1]
# In column direction
x = np.empty((self.ncol))

View File

@ -212,7 +212,7 @@ class ModflowDrn(Package):
@staticmethod
def get_empty(ncells=0, aux_names=None, structured=True):
# get an empty recaray that correponds to dtype
# get an empty recarray that corresponds to dtype
dtype = ModflowDrn.get_default_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)

View File

@ -209,7 +209,7 @@ class ModflowFhb(Package):
raise TypeError(msg)
elif isinstance(ds5, list):
ds5 = np.array(ds5)
# convert numpy array to a rec array
# convert numpy array to a recarray
if ds5.dtype != dtype:
ds5 = np.core.records.fromarrays(ds5.transpose(), dtype=dtype)
@ -225,7 +225,7 @@ class ModflowFhb(Package):
raise TypeError(msg)
elif isinstance(ds7, list):
ds7 = np.array(ds7)
# convert numpy array to a rec array
# convert numpy array to a recarray
if ds7.dtype != dtype:
ds7 = np.core.records.fromarrays(ds7.transpose(), dtype=dtype)
@ -281,7 +281,7 @@ class ModflowFhb(Package):
@staticmethod
def get_empty(ncells=0, nbdtim=1, structured=True, head=False):
# get an empty recarray that correponds to dtype
# get an empty recarray that corresponds to dtype
dtype = ModflowFhb.get_default_dtype(nbdtim=nbdtim,
structured=structured, head=head)
return create_empty_recarray(ncells, dtype, default_value=-1.0E+10)
@ -679,7 +679,7 @@ class ModflowFhb(Package):
model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
# auxillary data are not passed to load instantiation
# auxiliary data are not passed to load instantiation
nfhbx1 = 0
nfhbx2 = 0

View File

@ -129,7 +129,7 @@ class ModflowGage(Package):
for n in range(numgage):
files.append(filenames[n+1])
# convert gage_data to a recarry, if necessary
# convert gage_data to a recarray, if necessary
if isinstance(gage_data, np.ndarray):
if not gage_data.dtype == dtype:
gage_data = np.core.records.fromarrays(
@ -204,7 +204,7 @@ class ModflowGage(Package):
@staticmethod
def get_empty(ncells=0, aux_names=None, structured=True):
# get an empty recaray that correponds to dtype
# get an empty recarray that corresponds to dtype
dtype = ModflowGage.get_default_dtype()
return create_empty_recarray(ncells, dtype, default_value=-1.0E+10)

View File

@ -169,7 +169,7 @@ class ModflowGhb(Package):
def ncells(self):
"""
Returns the maximum number of cells that have a ghb cell
(developped for MT3DMS SSM package)
(developed for MT3DMS SSM package)
"""
return self.stress_period_data.mxact
@ -206,7 +206,7 @@ class ModflowGhb(Package):
@staticmethod
def get_empty(ncells=0, aux_names=None, structured=True):
# get an empty recaray that correponds to dtype
# get an empty recarray that corresponds to dtype
dtype = ModflowGhb.get_default_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)

View File

@ -196,7 +196,7 @@ class ModflowHfb(Package):
@staticmethod
def get_empty(ncells=0, aux_names=None, structured=True):
"""
Get an empty recarray that correponds to hfb dtype and has
Get an empty recarray that corresponds to hfb dtype and has
been extended to include aux variables and associated
aux names.

View File

@ -628,7 +628,7 @@ class HeadObservation(object):
d : np.recarray
"""
# get an empty recaray that correponds to dtype
# get an empty recarray that corresponds to dtype
dtype = self._get_dtype()
d = create_empty_recarray(ncells, dtype, default_value=-1.0E+10)
d['obsname'] = ''

View File

@ -35,7 +35,7 @@ class ModflowHyd(Package):
is located may be a no-flow cell. (default is -999.)
obsdata : list of lists, numpy array, or numpy recarray (nhyd, 7)
Each row of obsdata includes data defining pckg (3 character string),
arr (2 characater string), intyp (1 character string) klay (int),
arr (2 character string), intyp (1 character string) klay (int),
xl (float), yl (float), hydlbl (14 character string) for each observation.
pckg : str
@ -246,7 +246,7 @@ class ModflowHyd(Package):
@staticmethod
def get_empty(ncells=0):
# get an empty recaray that correponds to dtype
# get an empty recarray that corresponds to dtype
dtype = ModflowHyd.get_default_dtype()
return create_empty_recarray(ncells, dtype)

View File

@ -60,7 +60,7 @@ class ModflowLak(Package):
is a steady state stress period as defined in Ss/tr in the
Discretization file.
* SSCNCR and NSSITR can be read for a transient only simulation by
placing a negative sign immeditately in front of THETA. A negative
placing a negative sign immediately in front of THETA. A negative
THETA sets a flag which assumes input values for NSSITR and SSCNCR
will follow THETA in the format as described by Merritt and Konikow
(p. 52). A negative THETA is automatically reset to a positive
@ -77,7 +77,7 @@ class ModflowLak(Package):
periods is a steady state stress period as defined in Ss/tr in the
Discretization file.
* SSCNCR and NSSITR can be read for a transient only simulation by
placing a negative sign immeditately in front of THETA. A negative
placing a negative sign immediately in front of THETA. A negative
THETA sets a flag which assumes input values for NSSITR and SSCNCR
will follow THETA in the format as described by Merritt and Konikow
(p. 52). A negative THETA is automatically reset to a positive
@ -168,7 +168,7 @@ class ModflowLak(Package):
system being described in this record. The center lake number
is listed first.
And dataset 8b contains
SILLVT : sequnce of floats
SILLVT : sequence of floats
A sequence of sill elevations for each sublakes that determines
whether the center lake is connected with a given sublake.
Values are entered for each sublake in the order the sublakes
@ -210,7 +210,7 @@ class ModflowLak(Package):
augmentation. Normally, this would be used to specify the
rate of artificial withdrawal from a lake for human water use,
or if negative, artificial augmentation of a lake volume for
esthetic or recreational purposes.
aesthetic or recreational purposes.
SSMN : float
Minimum stage allowed for each lake in steady-state solution.
See notes on ssmn and ssmx above.

View File

@ -33,7 +33,7 @@ class ModflowMnw1(Package):
head loss type for each well
wel1_bynode_qsum : list of lists or None
nested list containing file names, unit numbers, and ALLTIME flag for
auxilary output, e.g. [['test.ByNode',92,'ALLTIME']]
auxiliary output, e.g. [['test.ByNode',92,'ALLTIME']]
if None, these optional external filenames and unit numbers are not written out
itmp : array
number of wells to be simulated for each stress period (shape : (NPER))
@ -122,7 +122,7 @@ class ModflowMnw1(Package):
self.nomoiter = nomoiter #-integer indicating the number of iterations for which flow in MNW wells is calculated
self.kspref = kspref #-alphanumeric key indicating which set of water levels are to be used as reference values for calculating drawdown
self.losstype = losstype #-string indicating head loss type for each well
self.wel1_bynode_qsum = wel1_bynode_qsum #-nested list containing file names, unit numbers, and ALLTIME flag for auxilary output, e.g. [['test.ByNode',92,'ALLTIME']]
self.wel1_bynode_qsum = wel1_bynode_qsum #-nested list containing file names, unit numbers, and ALLTIME flag for auxiliary output, e.g. [['test.ByNode',92,'ALLTIME']]
#if stress_period_data is not None:
# for per, spd in stress_period_data.items():
# for n in spd.dtype.names:
@ -150,7 +150,7 @@ class ModflowMnw1(Package):
@staticmethod
def get_empty_stress_period_data(itmp, structured=True,
default_value=0):
# get an empty recarray that correponds to dtype
# get an empty recarray that corresponds to dtype
dtype = ModflowMnw1.get_default_dtype(structured=structured)
return create_empty_recarray(itmp, dtype, default_value=default_value)
@ -481,4 +481,4 @@ def _parse_5(f, itmp,
def _write_5(f, spd):
f.write('{:d} {:d} {:d} {}')
pass
pass

View File

@ -103,7 +103,7 @@ class Mnw(object):
The number of entries (lines) in dataset 2h corresponds to the value of PUMPCAP.
If PUMPCAP does not equal 0, it must be set to an integer value of between 1 and 25, inclusive.
rw : float
radius of the well (losstype == 'THEIM', 'SKIN', or 'GENERAL')
radius of the well (losstype == 'THIEM', 'SKIN', or 'GENERAL')
rskin : float
radius to the outer limit of the skin (losstype == 'SKIN')
kskin : float
@ -158,7 +158,7 @@ class Mnw(object):
zbotm : float
bottom elevation of open intervals of vertical well.
wellid : str
losstyp : str
losstype : str
pumploc : int
qlimit : int
ppflag : int
@ -215,7 +215,7 @@ class Mnw(object):
qcut : int
qfrcmn : float
qfrcmx : float
Note: If auxillary variables are also being used, additional columns for these must be included.
Note: If auxiliary variables are also being used, additional columns for these must be included.
pumplay : int
pumprow : int
pumpcol : int
@ -398,7 +398,7 @@ class Mnw(object):
def get_empty_stress_period_data(nper=0, aux_names=None, structured=True,
default_value=0):
"""
Get an empty stress_period_data recarray that correponds to dtype
Get an empty stress_period_data recarray that corresponds to dtype
Parameters
----------
@ -867,7 +867,7 @@ class ModflowMnw2(Package):
self.nodtot = nodtot # user-specified maximum number of nodes
self.ipakcb = ipakcb
self.mnwprnt = int(mnwprnt) # -verbosity flag
self.aux = aux # -list of optional auxilary parameters
self.aux = aux # -list of optional auxiliary parameters
# Datasets 2-4 are contained in node_data and stress_period_data tables
# and/or in Mnw objects
@ -880,7 +880,7 @@ class ModflowMnw2(Package):
n in self.node_data.dtype.names]
for n in names:
self.node_data[n] = node_data[
n] # rec array of Mnw properties by node
n] # recarray of Mnw properties by node
self.nodtot = len(self.node_data)
self.node_data.sort(order=['wellid', 'k'])
# Python 3.5.0 produces a segmentation fault when trying to sort BR MNW wells
@ -934,7 +934,7 @@ class ModflowMnw2(Package):
def get_empty_node_data(maxnodes=0, aux_names=None, structured=True,
default_value=0):
"""
get an empty recarray that correponds to dtype
get an empty recarray that corresponds to dtype
Parameters
----------
@ -1326,7 +1326,7 @@ class ModflowMnw2(Package):
def make_node_data(self, mnwobjs):
"""
Make node_data rec array from Mnw objects
Make node_data recarray from Mnw objects
Parameters
----------
@ -1349,7 +1349,7 @@ class ModflowMnw2(Package):
def make_stress_period_data(self, mnwobjs):
"""
Make stress_period_data rec array from Mnw objects
Make stress_period_data recarray from Mnw objects
Parameters
----------

View File

@ -446,7 +446,7 @@ class ModflowOc(Package):
[100, 101]
"""
# set iubud by iterating through the pacakages
# set iubud by iterating through the packages
self._set_budgetunit()
return self.iubud

View File

@ -50,7 +50,7 @@ class ModflowPbc(Package):
self.parent.add_package(self)
def ncells(self):
# Returns the maximum number of cells that have recharge (developped for MT3DMS SSM package)
# Returns the maximum number of cells that have recharge (developed for MT3DMS SSM package)
return self.mxactp
def write_file(self):

View File

@ -231,7 +231,7 @@ class ModflowRiv(Package):
@staticmethod
def get_empty(ncells=0, aux_names=None, structured=True):
# get an empty recarray that correponds to dtype
# get an empty recarray that corresponds to dtype
dtype = ModflowRiv.get_default_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)

View File

@ -523,7 +523,7 @@ class ModflowSfr2(Package):
@staticmethod
def get_empty_reach_data(nreaches=0, aux_names=None, structured=True,
default_value=0.):
# get an empty recarray that correponds to dtype
# get an empty recarray that corresponds to dtype
dtype = ModflowSfr2.get_default_reach_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
@ -533,7 +533,7 @@ class ModflowSfr2(Package):
@staticmethod
def get_empty_segment_data(nsegments=0, aux_names=None, default_value=0.):
# get an empty recarray that correponds to dtype
# get an empty recarray that corresponds to dtype
dtype = ModflowSfr2.get_default_segment_dtype()
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
@ -707,7 +707,7 @@ class ModflowSfr2(Package):
channel_geometry_data = {}
channel_flow_data = {}
dataset_5 = {}
aux_variables = {} # not sure where the auxillary variables are supposed to go
aux_variables = {} # not sure where the auxiliary variables are supposed to go
for i in range(0, nper):
# Dataset 5
dataset_5[i] = _get_dataset(next(f), [1, 0, 0, 0])
@ -716,7 +716,7 @@ class ModflowSfr2(Package):
# Item 6
current = ModflowSfr2.get_empty_segment_data(nsegments=itmp,
aux_names=option)
current_aux = {} # container to hold any auxillary variables
current_aux = {} # container to hold any auxiliary variables
current_6d = {} # these could also be implemented as structured arrays with a column for segment number
current_6e = {}
#print(i,icalc,nstrm,isfropt,reachinput)
@ -1104,7 +1104,7 @@ class ModflowSfr2(Package):
self.reach_data['slope'] = slopes
def get_upsegs(self):
"""From segment_data, returns nested dict of all upstream segments by segemnt,
"""From segment_data, returns nested dict of all upstream segments by segment,
by stress period.
Returns
@ -1341,7 +1341,7 @@ class ModflowSfr2(Package):
Returns
-------
headwaters : np.ndarray (1-D)
One dimmensional array listing all headwater segments.
One dimensional array listing all headwater segments.
"""
upsegs = [self.segment_data[per].nseg[
self.segment_data[per].outseg == s].tolist()
@ -1359,7 +1359,7 @@ class ModflowSfr2(Package):
(e.g. hcond1 for hydraulic conductivity)
For segments with icalc=2 (specified channel geometry); if width1 is given,
the eigth distance point (XCPT8) from dataset 6d will be used as the stream width.
For icalc=3, an abitrary width of 5 is assigned.
For icalc=3, an arbitrary width of 5 is assigned.
For icalc=4, the mean value for width given in item 6e is used.
segvar2 : str
Column/variable name in segment_data array for representing start of segment
@ -1370,7 +1370,7 @@ class ModflowSfr2(Package):
Returns
-------
reach_values : 1D array
One dimmensional array of interpolated values of same length as reach_data array.
One dimensional array of interpolated values of same length as reach_data array.
For example, hcond1 and hcond2 could be entered as inputs to get values for the
strhc1 (hydraulic conductivity) column in reach_data.
@ -2170,7 +2170,7 @@ class check:
passed = False
if self.sfr.isfropt in [1, 2, 3]:
if np.diff(self.reach_data.strtop).max() == 0:
txt += 'isfropt setting of 1,2 or 3 requries strtop information!\n'
txt += 'isfropt setting of 1,2 or 3 requires strtop information!\n'
else:
is_less = self.reach_data.strtop < min_strtop
if np.any(is_less):
@ -2197,7 +2197,7 @@ class check:
passed = False
if self.sfr.isfropt in [1, 2, 3]:
if np.diff(self.reach_data.strtop).max() == 0:
txt += 'isfropt setting of 1,2 or 3 requries strtop information!\n'
txt += 'isfropt setting of 1,2 or 3 requires strtop information!\n'
else:
is_greater = self.reach_data.strtop > max_strtop
if np.any(is_greater):
@ -2339,7 +2339,7 @@ class check:
print(headertxt.strip())
txt = ''
if self.sfr.parent.dis is None:
txt += 'No DIS file supplied; cannot check SFR elevations agains model grid.'
txt += 'No DIS file supplied; cannot check SFR elevations against model grid.'
self._txt_footer(headertxt, txt, '')
return
passed = False
@ -2461,7 +2461,7 @@ class check:
passed = False
if self.sfr.isfropt in [1, 2, 3]:
if np.diff(self.reach_data.slope).max() == 0:
txt += 'isfropt setting of 1,2 or 3 requries slope information!\n'
txt += 'isfropt setting of 1,2 or 3 requires slope information!\n'
else:
is_less = self.reach_data.slope < minimum_slope
if np.any(is_less):
@ -2488,7 +2488,7 @@ class check:
passed = False
if self.sfr.isfropt in [1, 2, 3]:
if np.diff(self.reach_data.slope).max() == 0:
txt += 'isfropt setting of 1,2 or 3 requries slope information!\n'
txt += 'isfropt setting of 1,2 or 3 requires slope information!\n'
else:
is_greater = self.reach_data.slope > maximum_slope
@ -2530,7 +2530,7 @@ def _check_numbers(n, numbers, level=1, datatype='reach'):
txt += 'Invalid {} numbering\n'.format(datatype)
if level == 1:
non_consecutive = np.append(np.diff(numbers) != 1,
False) # consistent dimmension for boolean array
False) # consistent dimension for boolean array
gaps = num_range[non_consecutive] + 1
if len(gaps) > 0:
gapstr = ' '.join(map(str, gaps))
@ -2579,12 +2579,12 @@ def _get_dataset(line, dataset):
def _get_duplicates(a):
"""Returns duplcate values in an array, similar to pandas .duplicated() method
"""Returns duplicate values in an array, similar to pandas .duplicated() method
http://stackoverflow.com/questions/11528078/determining-duplicate-values-in-an-array
"""
s = np.sort(a, axis=None)
equal_to_previous_item = np.append(s[1:] == s[:-1],
False) # maintain same dimmension for boolean array
False) # maintain same dimension for boolean array
return np.unique(s[equal_to_previous_item])
@ -2745,7 +2745,7 @@ def _parse_1c(line, reachinput, transroute):
weight = float(line.pop(0))
flwtol = float(line.pop(0))
# auxillary variables (MODFLOW-LGR)
# auxiliary variables (MODFLOW-LGR)
option = [line[i] for i in np.arange(1, len(line)) if
'aux' in line[i - 1].lower()]

View File

@ -66,7 +66,7 @@ class ModflowStr(Package):
datasets 6 and 8.
The value for stress period data for a stress period can be an integer
(-1 or 0), a list of lists, a numpy array, or a numpy recarry. If
(-1 or 0), a list of lists, a numpy array, or a numpy recarray. If
stress period data for a stress period contains an integer, a -1 denotes
data from the previous stress period will be reused and a 0 indicates
there are no str reaches for this stress period.
@ -119,7 +119,7 @@ class ModflowStr(Package):
a integer value is specified for stress period data.
The value for segment data for a stress period can be an integer
(-1 or 0), a list of lists, a numpy array, or a numpy recarry. If
(-1 or 0), a list of lists, a numpy array, or a numpy recarray. If
segment data for a stress period contains an integer, a -1 denotes
data from the previous stress period will be reused and a 0 indicates
there are no str segments for this stress period.
@ -373,7 +373,7 @@ class ModflowStr(Package):
@staticmethod
def get_empty(ncells=0, nss=0, aux_names=None, structured=True):
# get an empty recarray that correponds to dtype
# get an empty recarray that corresponds to dtype
dtype, dtype2 = ModflowStr.get_default_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)

View File

@ -151,7 +151,7 @@ class ModflowSwt(Package):
by interbed system, vertical displacement, preconsolidation stress, change in
preconsolidation stress, geostatic stress, change in geostatic stress, effective
stress, void ration, thickness of compressible sediments, and layer-center
elecation will be printed. If ids16 is None and iswtoc>0 then print code 0
elevation will be printed. If ids16 is None and iswtoc>0 then print code 0
will be used for all data which is output to the binary swtsidence output file
(unit=1054). The 26 entries in ids16 correspond to ifm1, iun1, ifm2, iun2, ifm3,
iun3, ifm4, iun4, ifm5, iun5, ifm6, iun6, ifm7, iun7, ifm8, iun8, ifm9,

View File

@ -155,7 +155,7 @@ class ModflowUzf1(Package):
specifysurfk : boolean
(MODFLOW-NWT version 1.1 and MODFLOW-2005 1.12 or later)
An optional character variable. When SPECIFYSURFK is specified,
the variable SURFK is specfied in Data Set 4b.
the variable SURFK is specified in Data Set 4b.
rejectsurfk : boolean
(MODFLOW-NWT version 1.1 and MODFLOW-2005 1.12 or later)
An optional character variable. When REJECTSURFK is specified,
@ -177,7 +177,7 @@ class ModflowUzf1(Package):
for smoothfact (default is None).
For example, if the interval factor (smoothfact)
is specified as smoothfact=0.1 (recommended),
then the smoothing inerval will be calculated as:
then the smoothing interval will be calculated as:
SMOOTHINT = 0.1*EXTDP and is applied over the range for groundwater head (h):
* h < CELTOP-EXTDP, ET is zero;
* CELTOP-EXTDP < h < CELTOP-EXTDP+SMOOTHINT, ET is smoothed;
@ -519,7 +519,7 @@ class ModflowUzf1(Package):
return lst
def ncells(self):
# Returns the maximum number of cells that have recharge (developped for MT3DMS SSM package)
# Returns the maximum number of cells that have recharge (developed for MT3DMS SSM package)
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
return (nrow * ncol)

View File

@ -169,7 +169,7 @@ class Modpath(BaseModel):
are 'endpoint', 'pathline', and 'timeseries'.
(default is 'PATHLINE')
trackdir : str
Keywork that defines the MODPATH particle tracking direction.
Keyword that defines the MODPATH particle tracking direction.
Available trackdir's are 'backward' and 'forward'.
(default is 'forward')
packages : str or list of strings

View File

@ -402,7 +402,7 @@ class Modpath7(BaseModel):
Basename for MODPATH 7 input and output files (default is
'modpath7test').
trackdir : str
Keywork that defines the MODPATH particle tracking direction.
Keyword that defines the MODPATH particle tracking direction.
Available trackdir's are 'backward' and 'forward'.
(default is 'forward')
flowmodel : flopy.modflow.Modflow or flopy.mf6.MFModel object
@ -421,7 +421,7 @@ class Modpath7(BaseModel):
Number of particles in a cell in the row (y-coordinate)
direction (default is 2).
layercelldivisions : int
Number of oarticles in a cell in the layer (z-coordinate)
Number of particles in a cell in the layer (z-coordinate)
direction (default is 2).
nodes : int, list of ints, tuple of ints, or np.ndarray
Nodes (zero-based) with particles. If (default is node 0).

View File

@ -455,7 +455,7 @@ class FaceDataType(object):
rowdivisions5 : int
The number of row subdivisions that define the two-dimensional array
of particles on the bottom cell face (face 5) (default is 3).
columndivisons5 : int
columndivisions5 : int
The number of column subdivisions that define the two-dimensional array
of particles on the bottom cell face (face 5) (default is 3).
rowdivisions6 : int
@ -478,7 +478,7 @@ class FaceDataType(object):
verticaldivisions2=3, horizontaldivisions2=3,
verticaldivisions3=3, horizontaldivisions3=3,
verticaldivisions4=3, horizontaldivisions4=3,
rowdivisions5=3, columndivisons5=3,
rowdivisions5=3, columndivisions5=3,
rowdivisions6=3, columndivisions6=3):
"""
Class constructor
@ -498,7 +498,7 @@ class FaceDataType(object):
self.verticaldivisions4 = verticaldivisions4
self.horizontaldivisions4 = horizontaldivisions4
self.rowdivisions5 = rowdivisions5
self.columndivisons5 = columndivisons5
self.columndivisions5 = columndivisions5
self.rowdivisions6 = rowdivisions6
self.columndivisions6 = columndivisions6
return
@ -528,7 +528,7 @@ class FaceDataType(object):
self.verticaldivisions2, self.horizontaldivisions2,
self.verticaldivisions3, self.horizontaldivisions3,
self.verticaldivisions4, self.horizontaldivisions4,
self.rowdivisions5, self.columndivisons5,
self.rowdivisions5, self.columndivisions5,
self.rowdivisions6, self.columndivisions6)
f.write(line)
@ -557,7 +557,7 @@ class CellDataType(object):
Number of particles in a cell in the row (y-coordinate)
direction (default is 3).
layercelldivisions : int
Number of oarticles in a cell in the layer (z-coordinate)
Number of particles in a cell in the layer (z-coordinate)
direction (default is 3).
Examples

View File

@ -502,7 +502,7 @@ class Mt3dms(BaseModel):
verbose=verbose, model_ws=model_ws,
modflowmodel=modflowmodel)
files_succesfully_loaded = []
files_successfully_loaded = []
files_not_loaded = []
# read name file
@ -565,7 +565,7 @@ class Mt3dms(BaseModel):
ext_unit_dict=ext_unit_dict)
except Exception as e:
raise Exception('error loading BTN: {0}'.format(str(e)))
files_succesfully_loaded.append(btn.filename)
files_successfully_loaded.append(btn.filename)
if mt.verbose:
sys.stdout.write(' {:4s} package load...success\n'
.format(pck.name[0]))
@ -603,7 +603,7 @@ class Mt3dms(BaseModel):
try:
pck = item.package.load(item.filename, mt,
ext_unit_dict=ext_unit_dict)
files_succesfully_loaded.append(item.filename)
files_successfully_loaded.append(item.filename)
if mt.verbose:
sys.stdout.write(
' {:4s} package load...success\n'
@ -617,7 +617,7 @@ class Mt3dms(BaseModel):
else:
pck = item.package.load(item.filename, mt,
ext_unit_dict=ext_unit_dict)
files_succesfully_loaded.append(item.filename)
files_successfully_loaded.append(item.filename)
if mt.verbose:
sys.stdout.write(
' {:4s} package load...success\n'
@ -659,9 +659,9 @@ class Mt3dms(BaseModel):
if mt.verbose:
print(1 * '\n')
s = ' The following {0} packages were successfully loaded.' \
.format(len(files_succesfully_loaded))
.format(len(files_successfully_loaded))
print(s)
for fname in files_succesfully_loaded:
for fname in files_successfully_loaded:
print(' ' + os.path.basename(fname))
if len(files_not_loaded) > 0:
s = ' The following {0} packages were not loaded.'.format(

View File

@ -77,7 +77,7 @@ class Mt3dDsp(Package):
kwargs : dictionary
If a multi-species simulation, then dmcoef values can be specified for
other species as dmcoef2, dmcoef3, etc. For example:
dmcoef1=1.e-10, dmcoef2=4.e-10, ... If a value is not specifed, then
dmcoef1=1.e-10, dmcoef2=4.e-10, ... If a value is not specified, then
dmcoef is set to 0.0.
Attributes

View File

@ -57,7 +57,7 @@ class Mt3dLkt(Package):
the LAK input via the RNF variable appearing in record set 9a
and want to assign a non-zero concentration (default is zero)
associated with this specified source, use ISFBCTYP=2;
3 a Pump boundary condition. Users who specify a withdrawl
3 a Pump boundary condition. Users who specify a withdrawal
from a lake via the WTHDRW variable appearing in record set 9a
and want to assign a non-zero concentration (default is zero)
associated with this specified source, use ISFBCTYP=2;
@ -239,7 +239,7 @@ class Mt3dLkt(Package):
# List of concentrations associated with fluxes in/out of lake
# (Evap, precip, specified runoff into the lake, specified
# withdrawl directly from the lake
# withdrawal directly from the lake
if self.lk_stress_period_data is not None:
self.lk_stress_period_data.write_transient(f_lkt,
single_per=kper)

View File

@ -44,7 +44,7 @@ class Mt3dRct(Package):
rhob : float or array of floats (nlay, nrow, ncol)
rhob is the bulk density of the aquifer medium (unit, ML-3). rhob is
used if isothm = 1, 2, 3, 4, or 6. If rhob is not user-specified and
isothem is not 5 then rhob is set to 1.8e3. (default is None)
isothm is not 5 then rhob is set to 1.8e3. (default is None)
prsity2 : float or array of floats (nlay, nrow, ncol)
prsity2 is the porosity of the immobile domain (the ratio of pore
spaces filled with immobile fluids over the bulk volume of the aquifer

View File

@ -22,7 +22,7 @@ class Mt3dUzt(Package):
Is a flag that indicates whether or not ET is being simulated in the
UZF1 flow package (=0 indicates that ET is not being simulated).
If ET is not being simulated, IET informs FMI package not to look
for UZET and GWET arrays in the flow-tranpsort link file.
for UZET and GWET arrays in the flow-transport link file.
iuzfbnd : array of ints
Specifies which row/column indices variably-saturated transport will
be simulated in.

View File

@ -263,7 +263,7 @@ class Package(object):
chk = check(self, f=f, verbose=verbose, level=level)
active = chk.get_active()
# check for confined layers above convertable layers
# check for confined layers above convertible layers
confined = False
thickstrt = False
for option in self.options:
@ -303,7 +303,7 @@ class Package(object):
.format(name, mx), 'Warning')
# check for unusually high or low values of hydraulic conductivity
if self.layvka.sum() > 0: # convert vertical anistropy to Kv for checking
if self.layvka.sum() > 0: # convert vertical anisotropy to Kv for checking
vka = self.vka.array.copy()
for l in range(vka.shape[0]):
vka[l] *= self.hk.array[l] if self.layvka.array[
@ -355,7 +355,7 @@ class Package(object):
# only check specific yield for convertible layers
inds = np.array(
[True if l > 0 or l < 0 and 'THICKSRT' in self.options
[True if l > 0 or l < 0 and 'THICKSTRT' in self.options
else False for l in self.laytyp])
sarrays['sy'] = sarrays['sy'][inds, :, :]
active = active[inds, :, :]
@ -799,7 +799,7 @@ class Package(object):
"Package.load() error loading open/close file " + oc_filename + \
" :" + str(e))
assert current.shape[
0] == itmp, "Package.load() error: open/close rec array from file " + \
0] == itmp, "Package.load() error: open/close recarray from file " + \
oc_filename + " shape (" + str(current.shape) + \
") does not match itmp: {0:d}".format(
itmp)

View File

@ -625,10 +625,10 @@ class ModelMap(object):
Parameters
----------
pl : list of rec arrays or a single rec array
rec array or list of rec arrays is data returned from
pl : list of recarrays or a single recarray
recarray or list of recarrays is data returned from
modpathfile PathlineFile get_data() or get_alldata()
methods. Data in rec array is 'x', 'y', 'z', 'time',
methods. Data in recarray is 'x', 'y', 'z', 'time',
'k', and 'particleid'.
travel_time: float or str
travel_time is a travel time selection for the displayed
@ -780,7 +780,7 @@ class ModelMap(object):
Parameters
----------
ep : rec array
ep : recarray
A numpy recarray with the endpoint particle data from the
MODPATH 6 endpoint file
direction : str
@ -791,7 +791,7 @@ class ModelMap(object):
(l, r, c) to use to make a selection of particle endpoints.
The selection could be a well location to determine capture zone
for the well. If selection is None, all particle endpoints for
the user-sepcified direction will be plotted. (default is None)
the user-specified direction will be plotted. (default is None)
selection_direction : str
String defining is a selection should be made on starting or
ending particle locations. If selection is not None and
@ -937,10 +937,10 @@ class ModelMap(object):
Parameters
----------
ts : list of rec arrays or a single rec array
rec array or list of rec arrays is data returned from
ts : list of recarrays or a single recarray
recarray or list of recarrays is data returned from
modpathfile TimeseriesFile get_data() or get_alldata()
methods. Data in rec array is 'x', 'y', 'z', 'time',
methods. Data in recarray is 'x', 'y', 'z', 'time',
'k', and 'particleid'.
travel_time: float or str
travel_time is a travel time selection for the displayed

View File

@ -79,7 +79,7 @@ class SeawatVsc(Package):
amucoeff : float
is the coefficient array of size MUNCOEFF. AMUCOEFF is A in
equations 18, 19, and 20.
muncoef : int
muncoeff : int
is the size of the AMUCOEFF array.
invisc : int
is a flag. INVISC is read only if MT3DMUFLG is equal to zero.
@ -95,7 +95,7 @@ class SeawatVsc(Package):
is the fluid viscosity array read for each layer using the
MODFLOW-2000 U2DREL array reader. The VISC array is read only if
MT3DMUFLG is equal to zero. The VISC array may also be entered in
terms of solute concen- tration (or any other units) if INVISC is set
terms of solute concentration (or any other units) if INVISC is set
to 2, and the simple linear expression in item 3 can be used to
represent the relation to viscosity.
extension : string

View File

@ -1004,7 +1004,7 @@ class CellBudgetFile(object):
'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc.
full3D : boolean
If true, then return the record as a three dimensional numpy
array, even for those list-style records writen as part of a
array, even for those list-style records written as part of a
'COMPACT BUDGET' MODFLOW budget file. (Default is False.)
Returns
@ -1236,7 +1236,7 @@ class CellBudgetFile(object):
The zero-based record number. The first record is record 0.
full3D : boolean
If true, then return the record as a three dimensional numpy
array, even for those list-style records writen as part of a
array, even for those list-style records written as part of a
'COMPACT BUDGET' MODFLOW budget file. (Default is False.)
Returns

View File

@ -296,7 +296,7 @@ class check:
def _list_spd_check_violations(self, stress_period_data, criteria, col=None,
error_name='', error_type='Warning'):
"""If criteria contains any true values, return the error_type, package name, k,i,j indicies,
"""If criteria contains any true values, return the error_type, package name, k,i,j indices,
values, and description of error for each row in stress_period_data where criteria=True.
"""
inds_col = ['k', 'i', 'j'] if self.structured else ['node']
@ -363,7 +363,7 @@ class check:
Parameters
----------
include_cbd : boolean
If True, active is of same dimmension as the thickness array
If True, active is of same dimension as the thickness array
in the DIS module (includes quasi 3-D confining beds). Default False.
Returns
@ -407,7 +407,7 @@ class check:
def stress_period_data_values(self, stress_period_data, criteria, col=None,
error_name='', error_type='Warning'):
"""If criteria contains any true values, return the error_type, package name, k,i,j indicies,
"""If criteria contains any true values, return the error_type, package name, k,i,j indices,
values, and description of error for each row in stress_period_data where criteria=True.
"""
# check for valid cell indices
@ -480,7 +480,7 @@ class check:
if len(a) > 0:
t += ' {} {}s:\n'.format(len(a), etype)
if len(a) == 1:
t = t.replace('s', '') #grammer
t = t.replace('s', '') #grammar
for e in np.unique(desc):
n = np.sum(desc == e)
if n > 1:
@ -597,4 +597,4 @@ def get_neighbors(a):
tmp[1:-1, 2:, 1:-1].ravel(), # i+1
tmp[1:-1, 1:-1, :-2].ravel(), # j-1
tmp[1:-1, 1:-1, 2:].ravel()]) # j+1
return neighbors.reshape(6, nk, ni, nj)
return neighbors.reshape(6, nk, ni, nj)

View File

@ -3,7 +3,7 @@ import numpy as np
def area_of_polygon(x, y):
"""Calculates the signed area of an arbitrary polygon given its verticies
"""Calculates the signed area of an arbitrary polygon given its vertices
http://stackoverflow.com/a/4682656/190597 (Joe Kington)
http://softsurfer.com/Archive/algorithm_0101/algorithm_0101.htm#2D%20Polygons
"""

View File

@ -145,7 +145,7 @@ class FormattedLayerFile(LayerFile):
def _store_record(self, header, ipos):
"""
Store file header information in various formats for quick retreival
Store file header information in various formats for quick retrieval
"""
self.recordarray.append(header)
@ -176,7 +176,7 @@ class FormattedLayerFile(LayerFile):
current_row = 0
current_col = 0
result = np.empty((nrow, ncol), self.realtype)
# Loop until all data retreived or eof
# Loop until all data retrieved or eof
while (
current_row < nrow or current_col < ncol) and self.file.tell() != self.totalbytes:
line = self.file.readline()
@ -207,7 +207,7 @@ class FormattedLayerFile(LayerFile):
"""
current_col = 0
result = None
# Loop until data retreived or eof
# Loop until data retrieved or eof
while (
current_col < self.ncol - 1 or self.file.tell() == self.totalbytes) and current_col <= i:
line = self.file.readline()

View File

@ -23,7 +23,7 @@ class Polygon:
----------
exterior : (x, y, z) coordinates of exterior
interiors : tuple of (x, y, z) coordinates of each interior polygon
patch : descardes.PolygonPatch representation
patch : descartes.PolygonPatch representation
bounds : (xmin, ymin, xmax, ymax)
Tuple describing bounding box for polygon
geojson : dict

View File

@ -40,7 +40,7 @@ class Lgr(object):
xllp : float
x location of parent grid lower left corner
yllp : float
y locaiton of parent grid lower left corner
y location of parent grid lower left corner
"""

Some files were not shown because too many files have changed in this diff Show More